From 112d0fa7d71bcdb3dc832c721765277f0cca9d22 Mon Sep 17 00:00:00 2001 From: ebembi-crdb Date: Wed, 6 May 2026 14:57:22 +0530 Subject: [PATCH] Vendor cockroachdb/cockroach assets and remove external dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Vendor static files (K8s manifests, Prometheus configs, Grafana dashboards, monitoring rules) into src/current/files/cockroach/ and generated docs into src/current/_includes/cockroach-generated/ so the docs site builds and serves without fetching from the cockroachdb/cockroach GitHub repo at build time or linking to it at read time. - Vendor 32 static files previously fetched via curl/wget - Vendor 88 generated doc files (8 files × 11 release branches) - Replace remote_include tags with local Jekyll includes - Replace remote_include version extractions with hardcoded values - Replace raw.githubusercontent.com download URLs with cockroachlabs.com paths - Unlink PR, issue, and commit reference-style and inline links - Update blob/tree links to point to vendored copies or plain text - Clean up release, repo, and comment references Co-Authored-By: Claude Opus 4.6 --- src/api/api-spec.json | 128 +- src/current/_data/menus.yml | 2 - .../_data/v24.2/metrics/child-metrics.yml | 2 +- .../_data/v24.3/metrics/child-metrics.yml | 2 +- .../metrics/multi-dimensional-metrics.yml | 2 +- .../metrics/multi-dimensional-metrics.yml | 2 +- .../metrics/multi-dimensional-metrics.yml | 2 +- .../metrics/multi-dimensional-metrics.yml | 2 +- .../metrics/multi-dimensional-metrics.yml | 2 +- .../metrics/multi-dimensional-metrics.yml | 2 +- .../release-23.1/eventlog.md | 3216 +++++++++++++ .../release-23.1/logformats.md | 550 +++ .../release-23.1/logging.md | 179 + .../release-23.1/settings/settings.html | 271 ++ .../release-23.1/sql/aggregates.md | 519 +++ .../release-23.1/sql/functions.md | 3623 +++++++++++++++ .../release-23.1/sql/operators.md | 610 +++ .../release-23.1/sql/window_functions.md | 377 ++ .../release-23.2/eventlog.md | 3314 ++++++++++++++ .../release-23.2/logformats.md | 382 ++ .../release-23.2/logging.md | 179 + .../release-23.2/settings/settings.html | 293 ++ .../release-23.2/sql/aggregates.md | 531 +++ .../release-23.2/sql/functions.md | 3436 ++++++++++++++ .../release-23.2/sql/operators.md | 635 +++ .../release-23.2/sql/window_functions.md | 413 ++ .../release-24.1/eventlog.md | 3290 ++++++++++++++ .../release-24.1/logformats.md | 382 ++ .../release-24.1/logging.md | 179 + .../release-24.1/settings/settings.html | 310 ++ .../release-24.1/sql/aggregates.md | 579 +++ .../release-24.1/sql/functions.md | 3504 +++++++++++++++ .../release-24.1/sql/operators.md | 635 +++ .../release-24.1/sql/window_functions.md | 413 ++ .../release-24.2/eventlog.md | 3341 ++++++++++++++ .../release-24.2/logformats.md | 382 ++ .../release-24.2/logging.md | 179 + .../release-24.2/settings/settings.html | 315 ++ .../release-24.2/sql/aggregates.md | 579 +++ .../release-24.2/sql/functions.md | 3523 +++++++++++++++ .../release-24.2/sql/operators.md | 658 +++ .../release-24.2/sql/window_functions.md | 413 ++ .../release-24.3/eventlog.md | 3436 ++++++++++++++ .../release-24.3/logformats.md | 382 ++ .../release-24.3/logging.md | 179 + .../release-24.3/settings/settings.html | 371 ++ .../release-24.3/sql/aggregates.md | 579 +++ .../release-24.3/sql/functions.md | 3523 +++++++++++++++ .../release-24.3/sql/operators.md | 658 +++ .../release-24.3/sql/window_functions.md | 413 ++ .../release-25.1/eventlog.md | 3436 ++++++++++++++ .../release-25.1/logformats.md | 382 ++ .../release-25.1/logging.md | 179 + .../release-25.1/settings/settings.html | 371 ++ .../release-25.1/sql/aggregates.md | 579 +++ .../release-25.1/sql/functions.md | 3523 +++++++++++++++ .../release-25.1/sql/operators.md | 662 +++ .../release-25.1/sql/window_functions.md | 413 ++ .../release-25.2/eventlog.md | 3526 +++++++++++++++ .../release-25.2/logformats.md | 382 ++ .../release-25.2/logging.md | 179 + .../release-25.2/settings/settings.html | 383 ++ .../release-25.2/sql/aggregates.md | 579 +++ .../release-25.2/sql/functions.md | 3611 +++++++++++++++ .../release-25.2/sql/operators.md | 663 +++ .../release-25.2/sql/window_functions.md | 413 ++ .../release-25.3/eventlog.md | 3549 +++++++++++++++ .../release-25.3/logformats.md | 382 ++ .../release-25.3/logging.md | 179 + .../release-25.3/settings/settings.html | 380 ++ .../release-25.3/sql/aggregates.md | 579 +++ .../release-25.3/sql/functions.md | 3616 +++++++++++++++ .../release-25.3/sql/operators.md | 664 +++ .../release-25.3/sql/window_functions.md | 413 ++ .../release-25.4/eventlog.md | 3839 ++++++++++++++++ .../release-25.4/logformats.md | 382 ++ .../release-25.4/logging.md | 188 + .../release-25.4/settings/settings.html | 391 ++ .../release-25.4/sql/aggregates.md | 589 +++ .../release-25.4/sql/functions.md | 3669 +++++++++++++++ .../release-25.4/sql/operators.md | 684 +++ .../release-25.4/sql/window_functions.md | 431 ++ .../release-26.1/eventlog.md | 3874 ++++++++++++++++ .../release-26.1/logformats.md | 382 ++ .../release-26.1/logging.md | 188 + .../release-26.1/settings/settings.html | 396 ++ .../release-26.1/sql/aggregates.md | 589 +++ .../release-26.1/sql/functions.md | 3680 +++++++++++++++ .../release-26.1/sql/operators.md | 684 +++ .../release-26.1/sql/window_functions.md | 431 ++ .../release-26.2/eventlog.md | 4001 +++++++++++++++++ .../release-26.2/logformats.md | 382 ++ .../release-26.2/logging.md | 188 + .../release-26.2/settings/settings.html | 411 ++ .../release-26.2/sql/aggregates.md | 599 +++ .../release-26.2/sql/functions.md | 3732 +++++++++++++++ .../release-26.2/sql/operators.md | 684 +++ .../release-26.2/sql/window_functions.md | 431 ++ .../new-release-downloads-docker-image.md | 4 +- .../release-downloads-docker-image.md | 6 +- .../releases/v23.1/v23.1.0-alpha.1.md | 745 +-- .../releases/v23.1/v23.1.0-alpha.2.md | 614 +-- .../releases/v23.1/v23.1.0-alpha.3.md | 92 +- .../releases/v23.1/v23.1.0-alpha.4.md | 115 +- .../releases/v23.1/v23.1.0-alpha.5.md | 121 +- .../releases/v23.1/v23.1.0-alpha.6.md | 58 +- .../releases/v23.1/v23.1.0-alpha.7.md | 156 +- .../releases/v23.1/v23.1.0-alpha.8.md | 126 +- .../releases/v23.1/v23.1.0-alpha.9.md | 52 +- .../releases/v23.1/v23.1.0-beta.1.md | 59 +- .../releases/v23.1/v23.1.0-beta.2.md | 38 +- .../releases/v23.1/v23.1.0-beta.3.md | 54 +- .../_includes/releases/v23.1/v23.1.0-rc.1.md | 64 +- .../_includes/releases/v23.1/v23.1.0-rc.2.md | 3 +- .../_includes/releases/v23.1/v23.1.0.md | 58 +- .../_includes/releases/v23.1/v23.1.1.md | 3 +- .../_includes/releases/v23.1/v23.1.10.md | 3 +- .../_includes/releases/v23.1/v23.1.11.md | 156 +- .../_includes/releases/v23.1/v23.1.12.md | 185 +- .../_includes/releases/v23.1/v23.1.13.md | 73 +- .../_includes/releases/v23.1/v23.1.14.md | 83 +- .../_includes/releases/v23.1/v23.1.15.md | 71 +- .../_includes/releases/v23.1/v23.1.16.md | 3 +- .../_includes/releases/v23.1/v23.1.17.md | 90 +- .../_includes/releases/v23.1/v23.1.18.md | 39 +- .../_includes/releases/v23.1/v23.1.19.md | 3 +- .../_includes/releases/v23.1/v23.1.2.md | 367 +- .../_includes/releases/v23.1/v23.1.20.md | 15 +- .../_includes/releases/v23.1/v23.1.21.md | 47 +- .../_includes/releases/v23.1/v23.1.22.md | 53 +- .../_includes/releases/v23.1/v23.1.23.md | 63 +- .../_includes/releases/v23.1/v23.1.24.md | 40 +- .../_includes/releases/v23.1/v23.1.25.md | 63 +- .../_includes/releases/v23.1/v23.1.26.md | 21 +- .../_includes/releases/v23.1/v23.1.27.md | 3 +- .../_includes/releases/v23.1/v23.1.28.md | 41 +- .../_includes/releases/v23.1/v23.1.29.md | 67 +- .../_includes/releases/v23.1/v23.1.3.md | 143 +- .../_includes/releases/v23.1/v23.1.30.md | 31 +- .../_includes/releases/v23.1/v23.1.4.md | 46 +- .../_includes/releases/v23.1/v23.1.5.md | 87 +- .../_includes/releases/v23.1/v23.1.6.md | 9 +- .../_includes/releases/v23.1/v23.1.7.md | 49 +- .../_includes/releases/v23.1/v23.1.8.md | 9 +- .../_includes/releases/v23.1/v23.1.9.md | 284 +- .../releases/v23.2/v23.2.0-alpha.1.md | 1016 ++--- .../releases/v23.2/v23.2.0-alpha.2.md | 93 +- .../releases/v23.2/v23.2.0-alpha.3.md | 75 +- .../releases/v23.2/v23.2.0-alpha.4.md | 107 +- .../releases/v23.2/v23.2.0-alpha.5.md | 6 +- .../releases/v23.2/v23.2.0-alpha.6.md | 59 +- .../releases/v23.2/v23.2.0-alpha.7.md | 38 +- .../releases/v23.2/v23.2.0-beta.1.md | 15 +- .../releases/v23.2/v23.2.0-beta.2.md | 63 +- .../releases/v23.2/v23.2.0-beta.3.md | 35 +- .../_includes/releases/v23.2/v23.2.0-rc.1.md | 51 +- .../_includes/releases/v23.2/v23.2.0-rc.2.md | 6 +- .../_includes/releases/v23.2/v23.2.0.md | 40 +- .../_includes/releases/v23.2/v23.2.1.md | 170 +- .../_includes/releases/v23.2/v23.2.10.md | 64 +- .../_includes/releases/v23.2/v23.2.11.md | 9 +- .../_includes/releases/v23.2/v23.2.12.md | 72 +- .../_includes/releases/v23.2/v23.2.13.md | 72 +- .../_includes/releases/v23.2/v23.2.14.md | 3 +- .../_includes/releases/v23.2/v23.2.15.md | 4 +- .../_includes/releases/v23.2/v23.2.16.md | 91 +- .../_includes/releases/v23.2/v23.2.17.md | 59 +- .../_includes/releases/v23.2/v23.2.18.md | 3 +- .../_includes/releases/v23.2/v23.2.19.md | 66 +- .../_includes/releases/v23.2/v23.2.2.md | 3 +- .../_includes/releases/v23.2/v23.2.20.md | 26 +- .../_includes/releases/v23.2/v23.2.21.md | 15 +- .../_includes/releases/v23.2/v23.2.22.md | 15 +- .../_includes/releases/v23.2/v23.2.23.md | 3 +- .../_includes/releases/v23.2/v23.2.24.md | 4 +- .../_includes/releases/v23.2/v23.2.25.md | 18 +- .../_includes/releases/v23.2/v23.2.26.md | 15 +- .../_includes/releases/v23.2/v23.2.27.md | 18 +- .../_includes/releases/v23.2/v23.2.28.md | 3 +- .../_includes/releases/v23.2/v23.2.29.md | 3 +- .../_includes/releases/v23.2/v23.2.3.md | 106 +- .../_includes/releases/v23.2/v23.2.4.md | 47 +- .../_includes/releases/v23.2/v23.2.5.md | 51 +- .../_includes/releases/v23.2/v23.2.6.md | 116 +- .../_includes/releases/v23.2/v23.2.7.md | 71 +- .../_includes/releases/v23.2/v23.2.8.md | 6 +- .../_includes/releases/v23.2/v23.2.9.md | 72 +- .../releases/v24.1/v24.1.0-alpha.1.md | 496 +- .../releases/v24.1/v24.1.0-alpha.2.md | 78 +- .../releases/v24.1/v24.1.0-alpha.3.md | 43 +- .../releases/v24.1/v24.1.0-alpha.4.md | 91 +- .../releases/v24.1/v24.1.0-alpha.5.md | 90 +- .../releases/v24.1/v24.1.0-beta.1.md | 46 +- .../releases/v24.1/v24.1.0-beta.2.md | 6 +- .../releases/v24.1/v24.1.0-beta.3.md | 52 +- .../_includes/releases/v24.1/v24.1.0-rc.1.md | 39 +- .../_includes/releases/v24.1/v24.1.0-rc.2.md | 6 +- .../_includes/releases/v24.1/v24.1.0.md | 33 +- .../_includes/releases/v24.1/v24.1.1.md | 96 +- .../_includes/releases/v24.1/v24.1.10.md | 91 +- .../_includes/releases/v24.1/v24.1.11.md | 3 +- .../_includes/releases/v24.1/v24.1.12.md | 50 +- .../_includes/releases/v24.1/v24.1.13.md | 3 +- .../_includes/releases/v24.1/v24.1.14.md | 54 +- .../_includes/releases/v24.1/v24.1.15.md | 46 +- .../_includes/releases/v24.1/v24.1.16.md | 3 +- .../_includes/releases/v24.1/v24.1.17.md | 3 +- .../_includes/releases/v24.1/v24.1.18.md | 33 +- .../_includes/releases/v24.1/v24.1.19.md | 46 +- .../_includes/releases/v24.1/v24.1.2.md | 56 +- .../_includes/releases/v24.1/v24.1.20.md | 21 +- .../_includes/releases/v24.1/v24.1.21.md | 6 +- .../_includes/releases/v24.1/v24.1.22.md | 3 +- .../_includes/releases/v24.1/v24.1.23.md | 17 +- .../_includes/releases/v24.1/v24.1.24.md | 18 +- .../_includes/releases/v24.1/v24.1.25.md | 9 +- .../_includes/releases/v24.1/v24.1.26.md | 6 +- .../_includes/releases/v24.1/v24.1.27.md | 3 +- .../_includes/releases/v24.1/v24.1.28.md | 3 +- .../_includes/releases/v24.1/v24.1.3.md | 66 +- .../_includes/releases/v24.1/v24.1.4.md | 84 +- .../_includes/releases/v24.1/v24.1.5.md | 89 +- .../_includes/releases/v24.1/v24.1.6.md | 77 +- .../_includes/releases/v24.1/v24.1.7.md | 107 +- .../_includes/releases/v24.1/v24.1.8.md | 75 +- .../_includes/releases/v24.1/v24.1.9.md | 3 +- .../releases/v24.2/v24.2.0-alpha.1.md | 230 +- .../releases/v24.2/v24.2.0-alpha.2.md | 59 +- .../releases/v24.2/v24.2.0-beta.1.md | 65 +- .../releases/v24.2/v24.2.0-beta.2.md | 19 +- .../releases/v24.2/v24.2.0-beta.3.md | 6 +- .../_includes/releases/v24.2/v24.2.0-rc.1.md | 22 +- .../_includes/releases/v24.2/v24.2.0.md | 38 +- .../_includes/releases/v24.2/v24.2.1.md | 51 +- .../_includes/releases/v24.2/v24.2.10.md | 60 +- .../_includes/releases/v24.2/v24.2.2.md | 4 +- .../_includes/releases/v24.2/v24.2.3.md | 78 +- .../_includes/releases/v24.2/v24.2.4.md | 88 +- .../_includes/releases/v24.2/v24.2.5.md | 111 +- .../_includes/releases/v24.2/v24.2.6.md | 87 +- .../_includes/releases/v24.2/v24.2.7.md | 4 +- .../_includes/releases/v24.2/v24.2.8.md | 85 +- .../_includes/releases/v24.2/v24.2.9.md | 3 +- .../releases/v24.3/backward-incompatible.md | 3 +- .../_includes/releases/v24.3/deprecations.md | 3 +- .../releases/v24.3/v24.3.0-alpha.1.md | 335 +- .../releases/v24.3/v24.3.0-alpha.2.md | 58 +- .../releases/v24.3/v24.3.0-beta.1.md | 93 +- .../releases/v24.3/v24.3.0-beta.2.md | 22 +- .../releases/v24.3/v24.3.0-beta.3.md | 47 +- .../_includes/releases/v24.3/v24.3.0-rc.1.md | 64 +- .../_includes/releases/v24.3/v24.3.0.md | 33 +- .../_includes/releases/v24.3/v24.3.1.md | 75 +- .../_includes/releases/v24.3/v24.3.10.md | 3 +- .../_includes/releases/v24.3/v24.3.11.md | 3 +- .../_includes/releases/v24.3/v24.3.12.md | 68 +- .../_includes/releases/v24.3/v24.3.13.md | 3 +- .../_includes/releases/v24.3/v24.3.14.md | 63 +- .../_includes/releases/v24.3/v24.3.15.md | 40 +- .../_includes/releases/v24.3/v24.3.16.md | 23 +- .../_includes/releases/v24.3/v24.3.17.md | 3 +- .../_includes/releases/v24.3/v24.3.18.md | 3 +- .../_includes/releases/v24.3/v24.3.19.md | 36 +- .../_includes/releases/v24.3/v24.3.2.md | 3 +- .../_includes/releases/v24.3/v24.3.20.md | 31 +- .../_includes/releases/v24.3/v24.3.21.md | 9 +- .../_includes/releases/v24.3/v24.3.22.md | 3 +- .../_includes/releases/v24.3/v24.3.23.md | 15 +- .../_includes/releases/v24.3/v24.3.24.md | 3 +- .../_includes/releases/v24.3/v24.3.25.md | 9 +- .../_includes/releases/v24.3/v24.3.26.md | 3 +- .../_includes/releases/v24.3/v24.3.27.md | 6 +- .../_includes/releases/v24.3/v24.3.28.md | 3 +- .../_includes/releases/v24.3/v24.3.29.md | 15 +- .../_includes/releases/v24.3/v24.3.3.md | 123 +- .../_includes/releases/v24.3/v24.3.30.md | 6 +- .../_includes/releases/v24.3/v24.3.31.md | 3 +- .../_includes/releases/v24.3/v24.3.4.md | 3 +- .../_includes/releases/v24.3/v24.3.5.md | 82 +- .../_includes/releases/v24.3/v24.3.6.md | 3 +- .../_includes/releases/v24.3/v24.3.7.md | 58 +- .../_includes/releases/v24.3/v24.3.8.md | 3 +- .../_includes/releases/v24.3/v24.3.9.md | 65 +- .../releases/v25.1/backward-incompatible.md | 19 +- .../releases/v25.1/cluster-setting-changes.md | 30 +- .../_includes/releases/v25.1/deprecations.md | 3 +- .../releases/v25.1/upgrade-finalization.md | 8 +- .../releases/v25.1/v25.1.0-alpha.1.md | 222 +- .../releases/v25.1/v25.1.0-alpha.2.md | 160 +- .../releases/v25.1/v25.1.0-alpha.3.md | 67 +- .../releases/v25.1/v25.1.0-beta.1.md | 32 +- .../releases/v25.1/v25.1.0-beta.2.md | 24 +- .../releases/v25.1/v25.1.0-beta.3.md | 15 +- .../_includes/releases/v25.1/v25.1.0-rc.1.md | 39 +- .../_includes/releases/v25.1/v25.1.0.md | 1 - .../_includes/releases/v25.1/v25.1.1.md | 3 +- .../_includes/releases/v25.1/v25.1.10.md | 3 +- .../_includes/releases/v25.1/v25.1.2.md | 40 +- .../_includes/releases/v25.1/v25.1.3.md | 76 +- .../_includes/releases/v25.1/v25.1.4.md | 3 +- .../_includes/releases/v25.1/v25.1.5.md | 3 +- .../_includes/releases/v25.1/v25.1.6.md | 62 +- .../_includes/releases/v25.1/v25.1.7.md | 65 +- .../_includes/releases/v25.1/v25.1.8.md | 36 +- .../_includes/releases/v25.1/v25.1.9.md | 33 +- .../releases/v25.2/backward-incompatible.md | 13 +- .../releases/v25.2/cluster-setting-changes.md | 2 +- .../releases/v25.2/v25.2.0-alpha.1.md | 320 +- .../releases/v25.2/v25.2.0-alpha.2.md | 78 +- .../releases/v25.2/v25.2.0-alpha.3.md | 65 +- .../releases/v25.2/v25.2.0-beta.1.md | 49 +- .../releases/v25.2/v25.2.0-beta.2.md | 27 +- .../releases/v25.2/v25.2.0-beta.3.md | 29 +- .../_includes/releases/v25.2/v25.2.0-rc.1.md | 42 +- .../_includes/releases/v25.2/v25.2.1.md | 85 +- .../_includes/releases/v25.2/v25.2.10.md | 15 +- .../_includes/releases/v25.2/v25.2.11.md | 32 +- .../_includes/releases/v25.2/v25.2.12.md | 21 +- .../_includes/releases/v25.2/v25.2.13.md | 6 +- .../_includes/releases/v25.2/v25.2.14.md | 3 +- .../_includes/releases/v25.2/v25.2.15.md | 6 +- .../_includes/releases/v25.2/v25.2.16.md | 6 +- .../_includes/releases/v25.2/v25.2.17.md | 6 +- .../_includes/releases/v25.2/v25.2.2.md | 52 +- .../_includes/releases/v25.2/v25.2.3.md | 53 +- .../_includes/releases/v25.2/v25.2.4.md | 3 +- .../_includes/releases/v25.2/v25.2.5.md | 52 +- .../_includes/releases/v25.2/v25.2.6.md | 52 +- .../_includes/releases/v25.2/v25.2.7.md | 24 +- .../_includes/releases/v25.2/v25.2.8.md | 6 +- .../_includes/releases/v25.2/v25.2.9.md | 12 +- .../releases/v25.3/cluster-setting-changes.md | 17 +- .../_includes/releases/v25.3/deprecations.md | 10 +- .../releases/v25.3/v25.3.0-alpha.1.md | 246 +- .../releases/v25.3/v25.3.0-alpha.2.md | 64 +- .../releases/v25.3/v25.3.0-alpha.3.md | 27 +- .../releases/v25.3/v25.3.0-beta.1.md | 43 +- .../releases/v25.3/v25.3.0-beta.2.md | 9 +- .../releases/v25.3/v25.3.0-beta.3.md | 12 +- .../_includes/releases/v25.3/v25.3.0-rc.1.md | 39 +- .../_includes/releases/v25.3/v25.3.1.md | 31 +- .../_includes/releases/v25.3/v25.3.2.md | 52 +- .../_includes/releases/v25.3/v25.3.3.md | 37 +- .../_includes/releases/v25.3/v25.3.4.md | 3 +- .../_includes/releases/v25.3/v25.3.5.md | 18 +- .../_includes/releases/v25.3/v25.3.6.md | 31 +- .../_includes/releases/v25.3/v25.3.7.md | 31 +- .../releases/v25.4/backward-incompatible.md | 9 +- .../releases/v25.4/cluster-setting-changes.md | 56 +- .../_includes/releases/v25.4/deprecations.md | 10 +- .../releases/v25.4/v25.4.0-alpha.1.md | 468 +- .../releases/v25.4/v25.4.0-alpha.2.md | 39 +- .../releases/v25.4/v25.4.0-beta.1.md | 55 +- .../releases/v25.4/v25.4.0-beta.2.md | 48 +- .../releases/v25.4/v25.4.0-beta.3.md | 3 +- .../_includes/releases/v25.4/v25.4.0-rc.1.md | 33 +- .../_includes/releases/v25.4/v25.4.0.md | 14 +- .../_includes/releases/v25.4/v25.4.1.md | 69 +- .../_includes/releases/v25.4/v25.4.2.md | 23 +- .../_includes/releases/v25.4/v25.4.3.md | 47 +- .../_includes/releases/v25.4/v25.4.4.md | 33 +- .../_includes/releases/v25.4/v25.4.5.md | 9 +- .../_includes/releases/v25.4/v25.4.6.md | 12 +- .../_includes/releases/v25.4/v25.4.7.md | 28 +- .../_includes/releases/v25.4/v25.4.8.md | 3 +- .../_includes/releases/v25.4/v25.4.9.md | 6 +- .../releases/v26.1/backward-incompatible.md | 8 +- .../releases/v26.1/cluster-setting-changes.md | 16 +- .../_includes/releases/v26.1/deprecations.md | 6 +- .../releases/v26.1/v26.1.0-alpha.1.md | 322 +- .../releases/v26.1/v26.1.0-alpha.2.md | 96 +- .../releases/v26.1/v26.1.0-beta.1.md | 51 +- .../releases/v26.1/v26.1.0-beta.2.md | 33 +- .../releases/v26.1/v26.1.0-beta.3.md | 43 +- .../_includes/releases/v26.1/v26.1.0-rc.1.md | 42 +- .../_includes/releases/v26.1/v26.1.1.md | 46 +- .../_includes/releases/v26.1/v26.1.2.md | 33 +- .../_includes/releases/v26.1/v26.1.3.md | 3 +- .../releases/v26.2/backward-incompatible.md | 8 +- .../releases/v26.2/cluster-setting-changes.md | 16 +- .../_includes/releases/v26.2/deprecations.md | 4 +- .../releases/v26.2/v26.2.0-alpha.1.md | 430 +- .../releases/v26.2/v26.2.0-alpha.2.md | 132 +- .../releases/v26.2/v26.2.0-beta.1.md | 59 +- .../releases/v26.2/v26.2.0-beta.2.md | 21 +- .../releases/v26.2/v26.2.0-beta.3.md | 66 +- .../_includes/releases/v26.2/v26.2.0-rc.1.md | 9 +- .../known-limitations/copy-from-clients.md | 2 +- .../v20.2/known-limitations/copy-syntax.md | 8 +- .../known-limitations/old-multi-col-stats.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../single-col-stats-deletion.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../known-limitations/unordered-operations.md | 2 +- .../userfile-upload-non-recursive.md | 2 +- src/current/_includes/v20.2/misc/tooling.md | 18 +- .../kubernetes-prometheus-alertmanager.md | 18 +- .../start-cockroachdb-helm-secure.md | 2 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 6 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../v23.1/backward-incompatible/alpha.1.md | 26 +- .../_includes/v23.1/cdc/avro-udt-composite.md | 2 +- .../_includes/v23.1/cdc/csv-udt-composite.md | 2 +- .../_includes/v23.1/essential-metrics.md | 2 +- .../_includes/v23.1/faq/what-is-crdb.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../cdc-execution-locality.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v23.1/known-limitations/cdc-queries.md | 4 +- .../_includes/v23.1/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../v23.1/known-limitations/copy-syntax.md | 8 +- .../drop-owned-by-role-limitations.md | 2 +- .../forecasted-stats-limitations.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v23.1/known-limitations/restore-udf.md | 2 +- .../set-transaction-no-rollback.md | 2 +- ...ow-backup-locality-incremental-location.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v23.1/known-limitations/sql-cursors.md | 14 +- .../stats-refresh-upgrade.md | 2 +- .../userfile-upload-non-recursive.md | 2 +- src/current/_includes/v23.1/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- ...dexes-cannot-reference-computed-columns.md | 2 +- .../sql/expressions-as-on-conflict-targets.md | 2 +- .../_includes/v23.1/sql/jsonb-comparison.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../v23.1/sql/materialized-views-no-stats.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/select-for-update-limitations.md | 2 +- .../v23.2/backward-incompatible/alpha.1.md | 26 +- .../_includes/v23.2/cdc/avro-udt-composite.md | 2 +- .../_includes/v23.2/cdc/csv-udt-composite.md | 2 +- .../_includes/v23.2/essential-metrics.md | 2 +- .../_includes/v23.2/faq/what-is-crdb.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../cdc-execution-locality.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v23.2/known-limitations/cdc-queries.md | 4 +- .../_includes/v23.2/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../v23.2/known-limitations/copy-syntax.md | 8 +- .../drop-owned-by-role-limitations.md | 2 +- .../forecasted-stats-limitations.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../pcr-scheduled-changefeeds.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v23.2/known-limitations/restore-udf.md | 2 +- .../row-level-ttl-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- ...ow-backup-locality-incremental-location.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v23.2/known-limitations/sql-cursors.md | 14 +- .../stats-refresh-upgrade.md | 2 +- .../userfile-upload-non-recursive.md | 2 +- src/current/_includes/v23.2/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- ...dexes-cannot-reference-computed-columns.md | 2 +- .../sql/expressions-as-on-conflict-targets.md | 2 +- .../_includes/v23.2/sql/jsonb-comparison.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../v23.2/sql/materialized-views-no-stats.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- .../v24.1/backward-incompatible/alpha.1.md | 26 +- .../_includes/v24.1/cdc/avro-udt-composite.md | 2 +- .../_includes/v24.1/cdc/csv-udt-composite.md | 2 +- .../_includes/v24.1/essential-metrics.md | 2 +- .../_includes/v24.1/faq/what-is-crdb.md | 2 +- .../v24.1/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v24.1/known-limitations/cdc-queries.md | 4 +- .../_includes/v24.1/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../v24.1/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../fast-cutback-latest-timestamp.md | 2 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../pcr-scheduled-changefeeds.md | 2 +- .../known-limitations/plpgsql-limitations.md | 46 +- .../read-committed-limitations.md | 8 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v24.1/known-limitations/restore-udf.md | 2 +- .../known-limitations/routine-limitations.md | 14 +- .../row-level-ttl-limitations.md | 2 +- .../select-for-update-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v24.1/known-limitations/sql-cursors.md | 14 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 2 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 14 +- .../vectorized-engine-limitations.md | 2 +- src/current/_includes/v24.1/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- .../v24.2/backward-incompatible/alpha.1.md | 26 +- .../_includes/v24.2/cdc/avro-udt-composite.md | 2 +- .../_includes/v24.2/cdc/csv-udt-composite.md | 2 +- .../_includes/v24.2/essential-metrics.md | 2 +- .../_includes/v24.2/faq/what-is-crdb.md | 2 +- .../v24.2/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v24.2/known-limitations/cdc-queries.md | 4 +- .../_includes/v24.2/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../v24.2/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../known-limitations/plpgsql-limitations.md | 46 +- .../read-committed-limitations.md | 8 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v24.2/known-limitations/restore-udf.md | 2 +- .../known-limitations/routine-limitations.md | 16 +- .../select-for-update-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v24.2/known-limitations/sql-cursors.md | 14 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 2 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 14 +- .../vectorized-engine-limitations.md | 2 +- src/current/_includes/v24.2/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../v24.3/backward-incompatible/alpha.1.md | 26 +- .../_includes/v24.3/cdc/avro-udt-composite.md | 2 +- .../_includes/v24.3/cdc/csv-udt-composite.md | 2 +- .../_includes/v24.3/essential-metrics.md | 2 +- .../_includes/v24.3/faq/what-is-crdb.md | 2 +- .../v24.3/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v24.3/known-limitations/cdc-queries.md | 4 +- .../_includes/v24.3/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../compression-level-kafka-config.md | 2 +- .../v24.3/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../drop-trigger-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v24.3/known-limitations/ldr-indexes.md | 2 +- .../v24.3/known-limitations/ldr-sequences.md | 2 +- .../v24.3/known-limitations/ldr-triggers.md | 2 +- .../v24.3/known-limitations/ldr-udfs.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../known-limitations/plpgsql-limitations.md | 46 +- .../read-committed-limitations.md | 6 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v24.3/known-limitations/restore-udf.md | 2 +- .../known-limitations/routine-limitations.md | 16 +- .../row-level-ttl-limitations.md | 2 +- .../select-for-update-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v24.3/known-limitations/sql-cursors.md | 14 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 10 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 14 +- .../vectorized-engine-limitations.md | 2 +- src/current/_includes/v24.3/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- .../v25.1/backward-incompatible/alpha.1.md | 26 +- .../_includes/v25.1/cdc/avro-udt-composite.md | 2 +- .../_includes/v25.1/cdc/csv-udt-composite.md | 2 +- .../_includes/v25.1/essential-metrics.md | 2 +- .../_includes/v25.1/faq/what-is-crdb.md | 2 +- .../v25.1/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v25.1/known-limitations/cdc-queries.md | 4 +- .../_includes/v25.1/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../v25.1/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../drop-trigger-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v25.1/known-limitations/ldr-indexes.md | 2 +- .../v25.1/known-limitations/ldr-sequences.md | 2 +- .../v25.1/known-limitations/ldr-triggers.md | 2 +- .../v25.1/known-limitations/ldr-udfs.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../known-limitations/plpgsql-limitations.md | 46 +- .../read-committed-limitations.md | 6 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v25.1/known-limitations/restore-udf.md | 2 +- .../known-limitations/routine-limitations.md | 16 +- .../select-for-update-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v25.1/known-limitations/sql-cursors.md | 14 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 10 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 12 +- .../vectorized-engine-limitations.md | 2 +- src/current/_includes/v25.1/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../v25.2/backward-incompatible/alpha.1.md | 26 +- .../_includes/v25.2/cdc/avro-udt-composite.md | 2 +- .../_includes/v25.2/cdc/csv-udt-composite.md | 2 +- .../_includes/v25.2/essential-metrics.md | 2 +- .../_includes/v25.2/faq/what-is-crdb.md | 2 +- .../v25.2/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v25.2/known-limitations/cdc-queries.md | 4 +- .../_includes/v25.2/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../v25.2/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../drop-trigger-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../known-limitations/jsonpath-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v25.2/known-limitations/ldr-indexes.md | 2 +- .../v25.2/known-limitations/ldr-sequences.md | 2 +- .../v25.2/known-limitations/ldr-triggers.md | 2 +- .../v25.2/known-limitations/ldr-udfs.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../known-limitations/plpgsql-limitations.md | 44 +- .../read-committed-limitations.md | 6 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v25.2/known-limitations/restore-udf.md | 2 +- .../rls-values-on-conflict-do-nothing.md | 2 +- .../known-limitations/rls-visibility-issue.md | 2 +- .../known-limitations/routine-limitations.md | 16 +- .../row-level-ttl-limitations.md | 2 +- .../select-for-update-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v25.2/known-limitations/sql-cursors.md | 12 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 10 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 12 +- .../known-limitations/vector-limitations.md | 12 +- .../vectorized-engine-limitations.md | 2 +- src/current/_includes/v25.2/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- .../v25.3/backward-incompatible/alpha.1.md | 26 +- .../_includes/v25.3/cdc/avro-udt-composite.md | 2 +- .../_includes/v25.3/cdc/csv-udt-composite.md | 2 +- .../_includes/v25.3/faq/what-is-crdb.md | 2 +- .../v25.3/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v25.3/known-limitations/cdc-queries.md | 4 +- .../_includes/v25.3/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../known-limitations/citext-limitations.md | 2 +- .../v25.3/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../distsql-heterogeneous-endianness.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../drop-trigger-limitations.md | 2 +- .../enforce-home-region-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../geospatial-heterogeneous-architectures.md | 2 +- .../known-limitations/jsonpath-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v25.3/known-limitations/ldr-indexes.md | 2 +- .../v25.3/known-limitations/ldr-sequences.md | 2 +- .../v25.3/known-limitations/ldr-triggers.md | 2 +- .../v25.3/known-limitations/ldr-udfs.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../known-limitations/plpgsql-limitations.md | 44 +- .../read-committed-limitations.md | 6 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v25.3/known-limitations/restore-udf.md | 2 +- .../rls-update-set-where-returning.md | 2 +- .../rls-values-on-conflict-do-nothing.md | 2 +- .../known-limitations/rls-visibility-issue.md | 2 +- .../known-limitations/routine-limitations.md | 16 +- .../select-for-update-limitations.md | 2 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../v25.3/known-limitations/sql-cursors.md | 12 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 10 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 10 +- .../known-limitations/vector-limitations.md | 8 +- .../vectorized-engine-limitations.md | 2 +- src/current/_includes/v25.3/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../v25.4/backward-incompatible/alpha.1.md | 26 +- .../_includes/v25.4/cdc/avro-udt-composite.md | 2 +- .../_includes/v25.4/cdc/csv-udt-composite.md | 2 +- .../_includes/v25.4/faq/what-is-crdb.md | 2 +- .../v25.4/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-sequence-limitations.md | 2 +- .../alter-table-add-column-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v25.4/known-limitations/cdc-queries.md | 4 +- .../_includes/v25.4/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../known-limitations/citext-limitations.md | 2 +- .../known-limitations/collate-limitations.md | 2 +- .../comment-on-limitations.md | 2 +- .../composite-type-limitations.md | 4 +- .../v25.4/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../data-domiciling-limitations.md | 2 +- .../distsql-heterogeneous-endianness.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../drop-trigger-limitations.md | 2 +- .../enforce-home-region-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../geospatial-heterogeneous-architectures.md | 2 +- .../known-limitations/int-limitations.md | 2 +- .../inverted-index-limitations.md | 6 +- .../known-limitations/jsonpath-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v25.4/known-limitations/ldr-indexes.md | 2 +- .../v25.4/known-limitations/ldr-sequences.md | 2 +- .../v25.4/known-limitations/ldr-triggers.md | 2 +- .../v25.4/known-limitations/ldr-udfs.md | 2 +- .../like-escape-performance.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../max-row-size-limitations.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../node-shutdown-limitations.md | 2 +- .../known-limitations/null-limitations.md | 2 +- .../online-schema-changes-limitations.md | 4 +- .../partition-limitations.md | 2 +- .../known-limitations/plpgsql-limitations.md | 44 +- .../read-committed-limitations.md | 8 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v25.4/known-limitations/restore-udf.md | 2 +- .../rls-update-set-where-returning.md | 2 +- .../rls-values-on-conflict-do-nothing.md | 2 +- .../known-limitations/routine-limitations.md | 16 +- .../row-level-ttl-limitations.md | 2 +- .../savepoint-limitations.md | 2 +- .../select-for-update-limitations.md | 4 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../known-limitations/spatial-limitations.md | 16 +- .../v25.4/known-limitations/sql-cursors.md | 12 +- .../sql-guardrails-limitations.md | 4 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 4 +- .../subquery-mutations-limitations.md | 2 +- .../transaction-row-count-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 18 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 16 +- .../known-limitations/vector-limitations.md | 8 +- .../vectorized-engine-limitations.md | 2 +- .../known-limitations/view-limitations.md | 4 +- src/current/_includes/v25.4/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- .../v26.1/backward-incompatible/alpha.1.md | 26 +- .../_includes/v26.1/cdc/avro-udt-composite.md | 2 +- .../_includes/v26.1/cdc/csv-udt-composite.md | 2 +- .../_includes/v26.1/faq/what-is-crdb.md | 2 +- .../v26.1/finalization-required/119894.md | 2 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-sequence-limitations.md | 2 +- .../alter-table-add-column-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v26.1/known-limitations/cdc-queries.md | 4 +- .../_includes/v26.1/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../known-limitations/citext-limitations.md | 2 +- .../known-limitations/collate-limitations.md | 2 +- .../comment-on-limitations.md | 2 +- .../composite-type-limitations.md | 4 +- .../v26.1/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../data-domiciling-limitations.md | 2 +- .../distsql-heterogeneous-endianness.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../drop-trigger-limitations.md | 2 +- .../enforce-home-region-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../geospatial-heterogeneous-architectures.md | 2 +- .../known-limitations/int-limitations.md | 2 +- .../inverted-index-limitations.md | 6 +- .../known-limitations/jsonpath-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v26.1/known-limitations/ldr-indexes.md | 2 +- .../v26.1/known-limitations/ldr-sequences.md | 2 +- .../v26.1/known-limitations/ldr-triggers.md | 2 +- .../v26.1/known-limitations/ldr-udfs.md | 2 +- .../like-escape-performance.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../max-row-size-limitations.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../node-shutdown-limitations.md | 2 +- .../known-limitations/null-limitations.md | 2 +- .../online-schema-changes-limitations.md | 4 +- .../partition-limitations.md | 2 +- .../known-limitations/plpgsql-limitations.md | 44 +- .../read-committed-limitations.md | 8 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v26.1/known-limitations/restore-udf.md | 2 +- .../rls-update-set-where-returning.md | 2 +- .../rls-values-on-conflict-do-nothing.md | 2 +- .../known-limitations/routine-limitations.md | 20 +- .../row-level-ttl-limitations.md | 2 +- .../savepoint-limitations.md | 2 +- .../select-for-update-limitations.md | 4 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../known-limitations/spatial-limitations.md | 16 +- .../v26.1/known-limitations/sql-cursors.md | 12 +- .../sql-guardrails-limitations.md | 4 +- .../srid-4326-limitations.md | 2 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 4 +- .../subquery-mutations-limitations.md | 2 +- .../transaction-row-count-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 18 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 16 +- .../known-limitations/vector-limitations.md | 8 +- .../vectorized-engine-limitations.md | 2 +- .../known-limitations/view-limitations.md | 6 +- src/current/_includes/v26.1/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- .../v26.2/backward-incompatible/alpha.1.md | 26 +- .../_includes/v26.2/cdc/avro-udt-composite.md | 2 +- .../_includes/v26.2/cdc/csv-udt-composite.md | 2 +- .../_includes/v26.2/faq/what-is-crdb.md | 2 +- .../v26.2/finalization-required/119894.md | 2 +- .../active-session-history.md | 4 +- .../alter-changefeed-cdc-queries.md | 2 +- .../alter-changefeed-limitations.md | 2 +- .../alter-sequence-limitations.md | 2 +- .../alter-table-add-column-limitations.md | 2 +- .../alter-view-limitations.md | 2 +- .../known-limitations/aost-limitations.md | 2 +- ...-materialized-views-inside-transactions.md | 2 +- .../cdc-queries-column-families.md | 2 +- .../v26.2/known-limitations/cdc-queries.md | 4 +- .../_includes/v26.2/known-limitations/cdc.md | 8 +- .../changefeed-column-family-message.md | 2 +- .../known-limitations/citext-limitations.md | 2 +- .../known-limitations/collate-limitations.md | 2 +- .../comment-on-limitations.md | 2 +- .../composite-type-limitations.md | 4 +- .../v26.2/known-limitations/copy-syntax.md | 6 +- .../create-statistics-aost-limitation.md | 2 +- .../data-domiciling-limitations.md | 2 +- .../distsql-heterogeneous-endianness.md | 2 +- .../drop-column-partial-index.md | 2 +- .../drop-owned-by-limitations.md | 2 +- .../enforce-home-region-limitations.md | 2 +- .../expression-index-limitations.md | 4 +- .../forecasted-stats-limitations.md | 2 +- .../full-text-search-unsupported.md | 2 +- .../generic-query-plan-limitations.md | 4 +- .../geospatial-heterogeneous-architectures.md | 2 +- .../import-into-limitations.md | 2 +- .../known-limitations/int-limitations.md | 2 +- .../inverted-index-limitations.md | 6 +- .../known-limitations/jsonpath-limitations.md | 4 +- .../known-limitations/ldr-column-families.md | 2 +- .../ldr-composite-primary.md | 2 +- .../v26.2/known-limitations/ldr-indexes.md | 2 +- .../v26.2/known-limitations/ldr-sequences.md | 2 +- .../v26.2/known-limitations/ldr-triggers.md | 2 +- .../v26.2/known-limitations/ldr-udfs.md | 2 +- .../like-escape-performance.md | 2 +- ...timized-search-virtual-computed-columns.md | 2 +- .../materialized-views-no-stats.md | 2 +- .../max-row-size-limitations.md | 2 +- .../multiple-arbiter-indexes.md | 2 +- .../node-shutdown-limitations.md | 2 +- .../known-limitations/null-limitations.md | 2 +- .../online-schema-changes-limitations.md | 4 +- .../partition-limitations.md | 2 +- .../known-limitations/plpgsql-limitations.md | 44 +- .../read-committed-limitations.md | 8 +- .../restore-multiregion-match.md | 2 +- .../restore-tables-non-multi-reg.md | 2 +- .../v26.2/known-limitations/restore-udf.md | 2 +- .../v26.2/known-limitations/restore-zones.md | 2 +- .../rls-update-set-where-returning.md | 2 +- .../rls-values-on-conflict-do-nothing.md | 2 +- .../known-limitations/routine-limitations.md | 20 +- .../row-level-ttl-limitations.md | 2 +- .../savepoint-limitations.md | 2 +- .../select-for-update-limitations.md | 4 +- .../set-transaction-no-rollback.md | 2 +- .../known-limitations/show-backup-symlink.md | 2 +- .../known-limitations/spatial-limitations.md | 16 +- .../v26.2/known-limitations/sql-cursors.md | 12 +- .../sql-guardrails-limitations.md | 4 +- .../srid-4326-limitations.md | 2 +- .../statement-hints-limitations.md | 4 +- .../stats-refresh-upgrade.md | 2 +- .../stored-proc-limitations.md | 4 +- .../subquery-mutations-limitations.md | 2 +- .../transaction-row-count-limitations.md | 2 +- .../known-limitations/trigger-limitations.md | 12 +- .../trigram-unsupported-syntax.md | 2 +- .../known-limitations/udf-limitations.md | 16 +- .../known-limitations/vector-limitations.md | 8 +- .../vectorized-engine-limitations.md | 2 +- .../known-limitations/view-limitations.md | 4 +- src/current/_includes/v26.2/misc/tooling.md | 22 +- .../start-cockroachdb-insecure.md | 14 +- .../start-cockroachdb-local-insecure.md | 8 +- .../orchestration/start-cockroachdb-secure.md | 4 +- .../orchestration/test-cluster-secure.md | 2 +- .../decommission-pre-flight-checks.md | 2 +- ...vepoints-and-high-priority-transactions.md | 2 +- .../sql/unsupported-postgres-features.md | 4 +- src/current/advisories/a101963.md | 4 +- src/current/advisories/a102375.md | 4 +- src/current/advisories/a103220.md | 4 +- src/current/advisories/a104309.md | 8 +- src/current/advisories/a106617.md | 4 +- src/current/advisories/a110363.md | 4 +- src/current/advisories/a114393.md | 4 +- src/current/advisories/a122372.md | 6 +- src/current/advisories/a123371.md | 6 +- src/current/advisories/a131639.md | 4 +- src/current/advisories/a144650.md | 4 +- src/current/advisories/a151050.md | 4 +- src/current/advisories/a161317.md | 4 +- src/current/advisories/a162085.md | 4 +- src/current/advisories/a166122.md | 4 +- src/current/advisories/a190483.md | 4 +- src/current/advisories/a30821.md | 2 +- src/current/advisories/a42567.md | 2 +- src/current/advisories/a43870.md | 2 +- src/current/advisories/a44166.md | 2 +- src/current/advisories/a44299.md | 2 +- src/current/advisories/a44348.md | 2 +- src/current/advisories/a48860.md | 2 +- src/current/advisories/a50587.md | 2 +- src/current/advisories/a54418.md | 6 +- src/current/advisories/a56116.md | 3 +- src/current/advisories/a58932.md | 6 +- src/current/advisories/a62842.md | 9 +- src/current/advisories/a63162.md | 8 +- src/current/advisories/a64325.md | 6 +- src/current/advisories/a68005.md | 6 +- src/current/advisories/a69874.md | 4 +- src/current/advisories/a71002.md | 4 +- src/current/advisories/a71553.md | 4 +- src/current/advisories/a71655.md | 4 +- src/current/advisories/a72839.md | 2 +- src/current/advisories/a73024.md | 4 +- src/current/advisories/a73629.md | 4 +- src/current/advisories/a74385.md | 6 +- src/current/advisories/a74736.md | 4 +- src/current/advisories/a75758.md | 4 +- src/current/advisories/a76522.md | 4 +- src/current/advisories/a78681.md | 4 +- src/current/advisories/a79066.md | 4 +- src/current/advisories/a79281.md | 4 +- src/current/advisories/a79384.md | 4 +- src/current/advisories/a81315.md | 4 +- src/current/advisories/a81448.md | 4 +- src/current/advisories/a81968.md | 4 +- src/current/advisories/a82079.md | 4 +- src/current/advisories/a82309.md | 4 +- src/current/advisories/a82576.md | 4 +- src/current/advisories/a84144.md | 4 +- src/current/advisories/a88042.md | 8 +- src/current/advisories/a88407.md | 4 +- src/current/advisories/a88993.md | 4 +- src/current/advisories/a90146.md | 4 +- src/current/advisories/a93314.md | 4 +- src/current/advisories/a93398.md | 4 +- src/current/advisories/a96465.md | 6 +- src/current/advisories/a96924.md | 4 +- src/current/advisories/a97090.md | 4 +- src/current/advisories/a97178.md | 4 +- src/current/advisories/a97932.md | 4 +- src/current/advisories/a98779.md | 4 +- src/current/advisories/a99049.md | 4 +- src/current/advisories/a99561.md | 4 +- src/current/advisories/a99796.md | 4 +- .../bring-your-own-certs/client.yaml | 35 + .../cockroachdb-statefulset.yaml | 244 + .../cloud/kubernetes/client-secure.yaml | 48 + .../cloud/kubernetes/cluster-init.yaml | 19 + .../cockroachdb-statefulset-secure.yaml | 285 ++ .../kubernetes/cockroachdb-statefulset.yaml | 181 + .../cloud/kubernetes/example-app.yaml | 21 + .../cloud/kubernetes/multiregion/README.md | 86 + .../kubernetes/multiregion/client-secure.yaml | 27 + .../multiregion/cluster-init-secure.yaml | 28 + .../cockroachdb-statefulset-secure.yaml | 248 + .../cloud/kubernetes/multiregion/dns-lb.yaml | 23 + .../cockroachdb-statefulset-secure-eks.yaml | 286 ++ .../kubernetes/multiregion/eks/configmap.yaml | 41 + .../multiregion/eks/dns-lb-eks.yaml | 19 + .../multiregion/example-app-secure.yaml | 30 + .../multiregion/external-name-svc.yaml | 64 + .../cloud/kubernetes/multiregion/setup.py | 183 + .../cloud/kubernetes/multiregion/teardown.py | 53 + .../cockroachdb-statefulset-insecure.yaml | 215 + .../cockroachdb-statefulset-secure.yaml | 312 ++ .../kubernetes/prometheus/alert-rules.yaml | 205 + .../prometheus/alertmanager-config.yaml | 14 + .../kubernetes/prometheus/alertmanager.yaml | 27 + .../kubernetes/prometheus/prometheus.yaml | 94 + .../docs/RFCS/20160421_distributed_sql.md | 1517 +++++++ .../RFCS/20160706_expressive_zone_config.md | 241 + .../cockroach/docs/RFCS/20200331_enums.md | 398 ++ .../docs/RFCS/20200811_non_blocking_txns.md | 1096 +++++ .../RFCS/20230122_read_committed_isolation.md | 2471 ++++++++++ .../docs/tech-notes/admission_control.md | 343 ++ .../cockroach/docs/tech-notes/encoding.md | 561 +++ .../by-cluster/replication.json | 1308 ++++++ .../by-cluster/runtime.json | 987 ++++ .../grafana-dashboards/by-cluster/sql.json | 1922 ++++++++ .../by-cluster/storage.json | 1347 ++++++ .../files/cockroach/monitoring/prometheus.yml | 35 + .../monitoring/rules/aggregation.rules.yml | 109 + .../monitoring/rules/alerts.rules.yml | 157 + src/current/releases/downloads-archive.md | 4 +- src/current/releases/index.md | 2 +- src/current/v23.1/admission-control.md | 2 +- src/current/v23.1/alter-changefeed.md | 4 +- src/current/v23.1/alter-table.md | 2 +- src/current/v23.1/alter-view.md | 2 +- src/current/v23.1/architecture/sql-layer.md | 4 +- src/current/v23.1/cluster-settings.md | 2 +- .../v23.1/cluster-setup-troubleshooting.md | 2 +- .../v23.1/cockroachdb-feature-availability.md | 4 +- src/current/v23.1/common-table-expressions.md | 2 +- .../v23.1/configure-replication-zones.md | 4 +- src/current/v23.1/create-sequence.md | 2 +- src/current/v23.1/create-table.md | 2 +- src/current/v23.1/disaster-recovery.md | 2 +- src/current/v23.1/eventlog.md | 2 +- src/current/v23.1/example-apps.md | 2 +- src/current/v23.1/file-an-issue.md | 2 +- src/current/v23.1/fips.md | 2 +- src/current/v23.1/foreign-key.md | 2 +- src/current/v23.1/full-text-search.md | 2 +- src/current/v23.1/functions-and-operators.md | 8 +- src/current/v23.1/install-client-drivers.md | 2 +- src/current/v23.1/install-cockroachdb-mac.md | 2 +- src/current/v23.1/intellij-idea.md | 2 +- src/current/v23.1/jsonb.md | 4 +- src/current/v23.1/known-limitations.md | 108 +- src/current/v23.1/kubernetes-performance.md | 6 +- src/current/v23.1/log-formats.md | 2 +- src/current/v23.1/logging.md | 2 +- src/current/v23.1/migrate-from-avro.md | 2 +- .../v23.1/monitor-cockroachdb-kubernetes.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v23.1/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v23.1/partial-indexes.md | 6 +- src/current/v23.1/postgresql-compatibility.md | 12 +- src/current/v23.1/query-spatial-data.md | 2 +- src/current/v23.1/restore.md | 6 +- .../v23.1/schedule-cockroachdb-kubernetes.md | 2 +- src/current/v23.1/spatial-tutorial.md | 2 +- src/current/v23.1/sql-feature-support.md | 6 +- src/current/v23.1/sql-name-resolution.md | 2 +- src/current/v23.1/srid-4326.md | 2 +- src/current/v23.1/st_union.md | 2 +- src/current/v23.1/temporary-tables.md | 2 +- src/current/v23.1/trigram-indexes.md | 2 +- src/current/v23.1/user-defined-functions.md | 12 +- src/current/v23.1/vectorized-execution.md | 2 +- src/current/v23.1/views.md | 2 +- src/current/v23.2/admission-control.md | 4 +- src/current/v23.2/alter-changefeed.md | 4 +- src/current/v23.2/alter-table.md | 2 +- src/current/v23.2/alter-view.md | 2 +- src/current/v23.2/architecture/sql-layer.md | 4 +- ...changefeeds-in-multi-region-deployments.md | 2 +- src/current/v23.2/cluster-settings.md | 2 +- .../v23.2/cluster-setup-troubleshooting.md | 2 +- .../v23.2/cockroachdb-feature-availability.md | 4 +- src/current/v23.2/common-table-expressions.md | 2 +- .../v23.2/configure-replication-zones.md | 4 +- src/current/v23.2/create-sequence.md | 2 +- src/current/v23.2/create-table.md | 2 +- .../v23.2/disaster-recovery-planning.md | 2 +- src/current/v23.2/eventlog.md | 2 +- src/current/v23.2/example-apps.md | 2 +- src/current/v23.2/file-an-issue.md | 2 +- src/current/v23.2/fips.md | 2 +- src/current/v23.2/foreign-key.md | 2 +- src/current/v23.2/full-text-search.md | 2 +- src/current/v23.2/functions-and-operators.md | 8 +- src/current/v23.2/install-client-drivers.md | 2 +- src/current/v23.2/install-cockroachdb-mac.md | 2 +- src/current/v23.2/intellij-idea.md | 2 +- src/current/v23.2/jsonb.md | 4 +- src/current/v23.2/known-limitations.md | 142 +- src/current/v23.2/kubernetes-performance.md | 6 +- src/current/v23.2/log-formats.md | 2 +- src/current/v23.2/logging.md | 2 +- src/current/v23.2/migrate-from-avro.md | 2 +- .../v23.2/monitor-cockroachdb-kubernetes.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v23.2/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v23.2/partial-indexes.md | 6 +- src/current/v23.2/postgresql-compatibility.md | 12 +- src/current/v23.2/query-spatial-data.md | 2 +- src/current/v23.2/read-committed.md | 4 +- src/current/v23.2/restore.md | 6 +- .../v23.2/schedule-cockroachdb-kubernetes.md | 2 +- src/current/v23.2/spatial-tutorial.md | 2 +- src/current/v23.2/sql-feature-support.md | 4 +- src/current/v23.2/sql-name-resolution.md | 2 +- src/current/v23.2/srid-4326.md | 2 +- src/current/v23.2/st_union.md | 2 +- src/current/v23.2/temporary-tables.md | 2 +- src/current/v23.2/trigram-indexes.md | 2 +- .../v23.2/upgrade-cockroach-version.md | 30 +- src/current/v23.2/user-defined-functions.md | 16 +- src/current/v23.2/vectorized-execution.md | 2 +- src/current/v23.2/views.md | 2 +- src/current/v24.1/admission-control.md | 4 +- src/current/v24.1/architecture/sql-layer.md | 4 +- src/current/v24.1/cluster-settings.md | 2 +- .../v24.1/cluster-setup-troubleshooting.md | 2 +- .../v24.1/cockroachdb-feature-availability.md | 4 +- src/current/v24.1/common-table-expressions.md | 2 +- .../v24.1/configure-replication-zones.md | 4 +- src/current/v24.1/create-sequence.md | 2 +- src/current/v24.1/create-table.md | 2 +- .../v24.1/disaster-recovery-planning.md | 2 +- src/current/v24.1/eventlog.md | 2 +- src/current/v24.1/example-apps.md | 2 +- src/current/v24.1/file-an-issue.md | 2 +- src/current/v24.1/fips.md | 2 +- src/current/v24.1/foreign-key.md | 2 +- src/current/v24.1/functions-and-operators.md | 8 +- src/current/v24.1/install-client-drivers.md | 2 +- src/current/v24.1/install-cockroachdb-mac.md | 2 +- src/current/v24.1/intellij-idea.md | 2 +- src/current/v24.1/known-limitations.md | 82 +- src/current/v24.1/kubernetes-performance.md | 6 +- src/current/v24.1/log-formats.md | 2 +- src/current/v24.1/logging.md | 2 +- src/current/v24.1/migrate-from-avro.md | 2 +- .../v24.1/monitor-cockroachdb-kubernetes.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v24.1/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v24.1/postgresql-compatibility.md | 12 +- src/current/v24.1/query-spatial-data.md | 2 +- src/current/v24.1/read-committed.md | 4 +- src/current/v24.1/restore.md | 4 +- .../v24.1/schedule-cockroachdb-kubernetes.md | 2 +- src/current/v24.1/spatial-tutorial.md | 2 +- src/current/v24.1/sql-feature-support.md | 4 +- src/current/v24.1/sql-name-resolution.md | 2 +- src/current/v24.1/st_union.md | 2 +- src/current/v24.1/temporary-tables.md | 2 +- .../v24.1/upgrade-cockroach-version.md | 10 - src/current/v24.1/views.md | 2 +- src/current/v24.2/admission-control.md | 4 +- src/current/v24.2/architecture/sql-layer.md | 4 +- src/current/v24.2/cluster-settings.md | 2 +- .../v24.2/cluster-setup-troubleshooting.md | 2 +- .../v24.2/cockroachdb-feature-availability.md | 4 +- src/current/v24.2/common-table-expressions.md | 2 +- .../v24.2/configure-replication-zones.md | 4 +- src/current/v24.2/create-sequence.md | 2 +- src/current/v24.2/create-table.md | 2 +- .../v24.2/disaster-recovery-planning.md | 2 +- src/current/v24.2/eventlog.md | 2 +- src/current/v24.2/example-apps.md | 2 +- src/current/v24.2/file-an-issue.md | 2 +- src/current/v24.2/fips.md | 2 +- src/current/v24.2/foreign-key.md | 2 +- src/current/v24.2/functions-and-operators.md | 8 +- src/current/v24.2/install-client-drivers.md | 2 +- src/current/v24.2/install-cockroachdb-mac.md | 2 +- src/current/v24.2/intellij-idea.md | 2 +- src/current/v24.2/known-limitations.md | 68 +- src/current/v24.2/kubernetes-performance.md | 6 +- src/current/v24.2/log-formats.md | 2 +- src/current/v24.2/logging.md | 2 +- src/current/v24.2/migrate-from-avro.md | 2 +- .../v24.2/monitor-cockroachdb-kubernetes.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v24.2/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v24.2/postgresql-compatibility.md | 12 +- src/current/v24.2/query-spatial-data.md | 2 +- src/current/v24.2/read-committed.md | 4 +- src/current/v24.2/restore.md | 4 +- .../v24.2/schedule-cockroachdb-kubernetes.md | 2 +- src/current/v24.2/spatial-tutorial.md | 2 +- src/current/v24.2/sql-feature-support.md | 4 +- src/current/v24.2/sql-name-resolution.md | 2 +- src/current/v24.2/st_union.md | 2 +- src/current/v24.2/temporary-tables.md | 2 +- .../v24.2/upgrade-cockroach-version.md | 10 - src/current/v24.2/views.md | 2 +- src/current/v24.3/admission-control.md | 4 +- src/current/v24.3/architecture/sql-layer.md | 4 +- src/current/v24.3/cluster-settings.md | 2 +- .../v24.3/cluster-setup-troubleshooting.md | 2 +- .../v24.3/cockroachdb-feature-availability.md | 4 +- src/current/v24.3/common-table-expressions.md | 2 +- .../v24.3/configure-replication-zones.md | 4 +- src/current/v24.3/create-sequence.md | 2 +- src/current/v24.3/create-table.md | 2 +- .../v24.3/disaster-recovery-planning.md | 2 +- src/current/v24.3/eventlog.md | 2 +- src/current/v24.3/example-apps.md | 2 +- src/current/v24.3/file-an-issue.md | 2 +- src/current/v24.3/fips.md | 2 +- src/current/v24.3/foreign-key.md | 2 +- src/current/v24.3/functions-and-operators.md | 8 +- src/current/v24.3/install-client-drivers.md | 2 +- src/current/v24.3/install-cockroachdb-mac.md | 2 +- src/current/v24.3/intellij-idea.md | 2 +- src/current/v24.3/known-limitations.md | 68 +- src/current/v24.3/kubernetes-performance.md | 6 +- src/current/v24.3/log-formats.md | 2 +- src/current/v24.3/logging.md | 2 +- src/current/v24.3/migrate-from-avro.md | 2 +- .../v24.3/monitor-cockroachdb-kubernetes.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v24.3/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v24.3/postgresql-compatibility.md | 12 +- src/current/v24.3/query-spatial-data.md | 2 +- src/current/v24.3/read-committed.md | 4 +- src/current/v24.3/restore.md | 4 +- .../v24.3/schedule-cockroachdb-kubernetes.md | 2 +- src/current/v24.3/spatial-tutorial.md | 2 +- src/current/v24.3/sql-feature-support.md | 2 +- src/current/v24.3/sql-name-resolution.md | 2 +- src/current/v24.3/st_union.md | 2 +- src/current/v24.3/temporary-tables.md | 2 +- src/current/v24.3/views.md | 2 +- src/current/v25.1/admission-control.md | 4 +- src/current/v25.1/architecture/sql-layer.md | 4 +- src/current/v25.1/cluster-settings.md | 2 +- .../v25.1/cluster-setup-troubleshooting.md | 2 +- .../v25.1/cockroachdb-feature-availability.md | 4 +- src/current/v25.1/common-table-expressions.md | 2 +- .../v25.1/configure-replication-zones.md | 4 +- src/current/v25.1/create-sequence.md | 2 +- src/current/v25.1/create-table.md | 2 +- .../v25.1/disaster-recovery-planning.md | 2 +- src/current/v25.1/eventlog.md | 2 +- src/current/v25.1/example-apps.md | 2 +- src/current/v25.1/file-an-issue.md | 2 +- src/current/v25.1/fips.md | 2 +- src/current/v25.1/foreign-key.md | 2 +- src/current/v25.1/functions-and-operators.md | 8 +- src/current/v25.1/install-client-drivers.md | 2 +- src/current/v25.1/install-cockroachdb-mac.md | 2 +- src/current/v25.1/intellij-idea.md | 2 +- src/current/v25.1/known-limitations.md | 68 +- src/current/v25.1/kubernetes-performance.md | 6 +- src/current/v25.1/log-formats.md | 2 +- src/current/v25.1/logging.md | 2 +- src/current/v25.1/migrate-from-avro.md | 2 +- .../v25.1/monitor-cockroachdb-kubernetes.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v25.1/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v25.1/postgresql-compatibility.md | 12 +- src/current/v25.1/query-spatial-data.md | 2 +- src/current/v25.1/read-committed.md | 4 +- src/current/v25.1/restore.md | 4 +- .../v25.1/schedule-cockroachdb-kubernetes.md | 2 +- src/current/v25.1/spatial-tutorial.md | 2 +- src/current/v25.1/sql-feature-support.md | 2 +- src/current/v25.1/sql-name-resolution.md | 2 +- src/current/v25.1/st_union.md | 2 +- src/current/v25.1/temporary-tables.md | 2 +- src/current/v25.1/views.md | 2 +- src/current/v25.2/admission-control.md | 4 +- src/current/v25.2/architecture/sql-layer.md | 4 +- src/current/v25.2/cluster-settings.md | 2 +- .../v25.2/cluster-setup-troubleshooting.md | 2 +- .../v25.2/cockroachdb-feature-availability.md | 4 +- src/current/v25.2/common-table-expressions.md | 2 +- .../v25.2/configure-replication-zones.md | 4 +- src/current/v25.2/create-sequence.md | 2 +- src/current/v25.2/create-table.md | 2 +- .../v25.2/disaster-recovery-planning.md | 2 +- src/current/v25.2/eventlog.md | 2 +- src/current/v25.2/example-apps.md | 2 +- src/current/v25.2/file-an-issue.md | 2 +- src/current/v25.2/fips.md | 2 +- src/current/v25.2/foreign-key.md | 2 +- src/current/v25.2/functions-and-operators.md | 8 +- src/current/v25.2/install-client-drivers.md | 2 +- src/current/v25.2/install-cockroachdb-mac.md | 2 +- src/current/v25.2/intellij-idea.md | 2 +- src/current/v25.2/known-limitations.md | 68 +- src/current/v25.2/kubernetes-performance.md | 6 +- src/current/v25.2/log-formats.md | 2 +- src/current/v25.2/logging.md | 2 +- src/current/v25.2/migrate-from-avro.md | 2 +- .../v25.2/monitor-cockroachdb-kubernetes.md | 16 +- .../v25.2/monitor-cockroachdb-operator.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v25.2/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v25.2/postgresql-compatibility.md | 12 +- src/current/v25.2/query-spatial-data.md | 2 +- src/current/v25.2/read-committed.md | 4 +- src/current/v25.2/restore.md | 4 +- .../v25.2/schedule-cockroachdb-kubernetes.md | 2 +- .../v25.2/schedule-cockroachdb-operator.md | 2 +- src/current/v25.2/spatial-tutorial.md | 2 +- src/current/v25.2/sql-feature-support.md | 2 +- src/current/v25.2/sql-name-resolution.md | 2 +- src/current/v25.2/st_union.md | 2 +- src/current/v25.2/temporary-tables.md | 2 +- src/current/v25.2/views.md | 2 +- src/current/v25.3/admission-control.md | 4 +- src/current/v25.3/architecture/sql-layer.md | 4 +- src/current/v25.3/cluster-settings.md | 2 +- .../v25.3/cluster-setup-troubleshooting.md | 2 +- .../v25.3/cockroachdb-feature-availability.md | 4 +- src/current/v25.3/common-table-expressions.md | 2 +- .../v25.3/configure-replication-zones.md | 4 +- src/current/v25.3/create-sequence.md | 2 +- src/current/v25.3/create-table.md | 2 +- .../v25.3/disaster-recovery-planning.md | 2 +- src/current/v25.3/eventlog.md | 2 +- src/current/v25.3/example-apps.md | 2 +- src/current/v25.3/file-an-issue.md | 2 +- src/current/v25.3/fips.md | 2 +- src/current/v25.3/foreign-key.md | 2 +- src/current/v25.3/functions-and-operators.md | 8 +- src/current/v25.3/install-client-drivers.md | 2 +- src/current/v25.3/install-cockroachdb-mac.md | 2 +- src/current/v25.3/intellij-idea.md | 2 +- src/current/v25.3/known-limitations.md | 68 +- src/current/v25.3/kubernetes-performance.md | 6 +- src/current/v25.3/log-formats.md | 2 +- src/current/v25.3/logging.md | 2 +- src/current/v25.3/migrate-from-avro.md | 2 +- .../v25.3/monitor-cockroachdb-kubernetes.md | 16 +- .../v25.3/monitor-cockroachdb-operator.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v25.3/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v25.3/postgresql-compatibility.md | 12 +- src/current/v25.3/query-spatial-data.md | 2 +- src/current/v25.3/read-committed.md | 4 +- src/current/v25.3/restore.md | 4 +- .../v25.3/schedule-cockroachdb-kubernetes.md | 2 +- .../v25.3/schedule-cockroachdb-operator.md | 2 +- src/current/v25.3/spatial-tutorial.md | 2 +- src/current/v25.3/sql-feature-support.md | 2 +- src/current/v25.3/sql-name-resolution.md | 2 +- src/current/v25.3/st_union.md | 2 +- src/current/v25.3/temporary-tables.md | 2 +- src/current/v25.3/views.md | 2 +- src/current/v25.4/admission-control.md | 4 +- src/current/v25.4/architecture/sql-layer.md | 4 +- src/current/v25.4/cluster-settings.md | 2 +- .../v25.4/cluster-setup-troubleshooting.md | 4 +- .../v25.4/cockroachdb-feature-availability.md | 4 +- src/current/v25.4/common-table-expressions.md | 2 +- .../v25.4/configure-replication-zones.md | 4 +- src/current/v25.4/create-sequence.md | 2 +- .../v25.4/disaster-recovery-planning.md | 2 +- src/current/v25.4/eventlog.md | 2 +- src/current/v25.4/example-apps.md | 2 +- src/current/v25.4/file-an-issue.md | 2 +- src/current/v25.4/fips.md | 2 +- src/current/v25.4/foreign-key.md | 2 +- src/current/v25.4/functions-and-operators.md | 8 +- src/current/v25.4/install-client-drivers.md | 2 +- src/current/v25.4/install-cockroachdb-mac.md | 2 +- src/current/v25.4/intellij-idea.md | 2 +- src/current/v25.4/kubernetes-performance.md | 6 +- src/current/v25.4/log-formats.md | 2 +- src/current/v25.4/logging.md | 2 +- src/current/v25.4/migrate-from-avro.md | 2 +- .../v25.4/monitor-cockroachdb-kubernetes.md | 16 +- .../v25.4/monitor-cockroachdb-operator.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v25.4/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v25.4/postgresql-compatibility.md | 12 +- src/current/v25.4/query-spatial-data.md | 2 +- src/current/v25.4/read-committed.md | 4 +- src/current/v25.4/restore.md | 4 +- .../v25.4/schedule-cockroachdb-kubernetes.md | 2 +- .../v25.4/schedule-cockroachdb-operator.md | 2 +- src/current/v25.4/spatial-tutorial.md | 2 +- src/current/v25.4/sql-feature-support.md | 2 +- src/current/v25.4/sql-name-resolution.md | 2 +- src/current/v25.4/st_union.md | 2 +- src/current/v25.4/temporary-tables.md | 2 +- src/current/v25.4/views.md | 2 +- src/current/v26.1/admission-control.md | 4 +- src/current/v26.1/architecture/sql-layer.md | 4 +- src/current/v26.1/cluster-settings.md | 2 +- .../v26.1/cluster-setup-troubleshooting.md | 4 +- .../v26.1/cockroachdb-feature-availability.md | 2 +- src/current/v26.1/common-table-expressions.md | 2 +- .../v26.1/configure-replication-zones.md | 4 +- src/current/v26.1/create-sequence.md | 2 +- .../v26.1/disaster-recovery-planning.md | 2 +- src/current/v26.1/eventlog.md | 2 +- src/current/v26.1/file-an-issue.md | 2 +- src/current/v26.1/fips.md | 2 +- src/current/v26.1/foreign-key.md | 2 +- src/current/v26.1/functions-and-operators.md | 8 +- src/current/v26.1/install-client-drivers.md | 2 +- src/current/v26.1/install-cockroachdb-mac.md | 2 +- src/current/v26.1/intellij-idea.md | 2 +- src/current/v26.1/kubernetes-performance.md | 6 +- src/current/v26.1/log-formats.md | 2 +- src/current/v26.1/logging.md | 2 +- src/current/v26.1/migrate-from-avro.md | 2 +- .../v26.1/monitor-cockroachdb-kubernetes.md | 16 +- .../v26.1/monitor-cockroachdb-operator.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v26.1/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v26.1/postgresql-compatibility.md | 12 +- src/current/v26.1/query-spatial-data.md | 2 +- src/current/v26.1/read-committed.md | 4 +- src/current/v26.1/restore.md | 4 +- .../v26.1/schedule-cockroachdb-kubernetes.md | 2 +- .../v26.1/schedule-cockroachdb-operator.md | 2 +- src/current/v26.1/sql-feature-support.md | 2 +- src/current/v26.1/sql-name-resolution.md | 2 +- src/current/v26.1/st_union.md | 2 +- src/current/v26.1/temporary-tables.md | 2 +- src/current/v26.1/views.md | 2 +- src/current/v26.2/active-session-history.md | 2 +- src/current/v26.2/admission-control.md | 4 +- src/current/v26.2/architecture/sql-layer.md | 4 +- src/current/v26.2/cluster-settings.md | 2 +- .../v26.2/cluster-setup-troubleshooting.md | 4 +- .../v26.2/cockroachdb-feature-availability.md | 2 +- src/current/v26.2/common-table-expressions.md | 2 +- .../v26.2/configure-replication-zones.md | 4 +- src/current/v26.2/create-sequence.md | 2 +- .../v26.2/disaster-recovery-planning.md | 2 +- src/current/v26.2/eventlog.md | 2 +- src/current/v26.2/file-an-issue.md | 2 +- src/current/v26.2/fips.md | 2 +- src/current/v26.2/foreign-key.md | 2 +- src/current/v26.2/functions-and-operators.md | 8 +- src/current/v26.2/install-client-drivers.md | 2 +- src/current/v26.2/install-cockroachdb-mac.md | 2 +- src/current/v26.2/intellij-idea.md | 2 +- src/current/v26.2/kubernetes-performance.md | 6 +- src/current/v26.2/log-formats.md | 2 +- src/current/v26.2/logging.md | 2 +- src/current/v26.2/migrate-from-avro.md | 2 +- .../v26.2/monitor-cockroachdb-kubernetes.md | 16 +- .../v26.2/monitor-cockroachdb-operator.md | 16 +- .../monitor-cockroachdb-with-prometheus.md | 24 +- src/current/v26.2/monitoring-and-alerting.md | 12 +- ...ckroachdb-with-kubernetes-multi-cluster.md | 22 +- src/current/v26.2/postgresql-compatibility.md | 12 +- src/current/v26.2/query-spatial-data.md | 2 +- src/current/v26.2/read-committed.md | 4 +- src/current/v26.2/restore.md | 4 +- .../v26.2/schedule-cockroachdb-kubernetes.md | 2 +- .../v26.2/schedule-cockroachdb-operator.md | 2 +- src/current/v26.2/sql-feature-support.md | 4 +- src/current/v26.2/sql-name-resolution.md | 2 +- src/current/v26.2/st_union.md | 2 +- src/current/v26.2/temporary-tables.md | 2 +- src/current/v26.2/views.md | 2 +- 1648 files changed, 130626 insertions(+), 14765 deletions(-) create mode 100644 src/current/_includes/cockroach-generated/release-23.1/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-23.1/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-23.1/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-23.1/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-23.1/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-23.1/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-23.1/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-23.1/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-23.2/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-23.2/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-24.1/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-24.1/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-24.2/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-24.2/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-24.3/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-24.3/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-25.1/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-25.1/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-25.2/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-25.2/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-25.3/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-25.3/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-25.4/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-25.4/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-26.1/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-26.1/sql/window_functions.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/eventlog.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/logformats.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/logging.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/settings/settings.html create mode 100644 src/current/_includes/cockroach-generated/release-26.2/sql/aggregates.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/sql/functions.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/sql/operators.md create mode 100644 src/current/_includes/cockroach-generated/release-26.2/sql/window_functions.md create mode 100644 src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/client-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/example-app.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/README.md create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/cluster-init-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/dns-lb.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/example-app-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/external-name-svc.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/setup.py create mode 100644 src/current/files/cockroach/cloud/kubernetes/multiregion/teardown.py create mode 100644 src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml create mode 100644 src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml create mode 100644 src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md create mode 100644 src/current/files/cockroach/docs/RFCS/20160706_expressive_zone_config.md create mode 100644 src/current/files/cockroach/docs/RFCS/20200331_enums.md create mode 100644 src/current/files/cockroach/docs/RFCS/20200811_non_blocking_txns.md create mode 100644 src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md create mode 100644 src/current/files/cockroach/docs/tech-notes/admission_control.md create mode 100644 src/current/files/cockroach/docs/tech-notes/encoding.md create mode 100644 src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json create mode 100644 src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json create mode 100644 src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json create mode 100644 src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json create mode 100644 src/current/files/cockroach/monitoring/prometheus.yml create mode 100644 src/current/files/cockroach/monitoring/rules/aggregation.rules.yml create mode 100644 src/current/files/cockroach/monitoring/rules/alerts.rules.yml diff --git a/src/api/api-spec.json b/src/api/api-spec.json index dd649cbd260..1d7e2eed365 100644 --- a/src/api/api-spec.json +++ b/src/api/api-spec.json @@ -636,13 +636,13 @@ "$ref": "#/definitions/UUID" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "ActiveQuery_Phase": { "type": "integer", "format": "int32", "title": "Phase of execution: `0` for PREPARING, `1` for EXECUTING. For more information see the doc \"SHOW QUERIES\".", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "Attributes": { "description": "Attributes specifies a list of arbitrary strings describing\nnode topology, store type, and machine capabilities.", @@ -656,7 +656,7 @@ "x-go-name": "Attrs" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "ClockTimestamp": { "description": "The statically typed version of a Timestamp with its\nSynthetic flag set to false.", @@ -681,12 +681,12 @@ "x-go-name": "Value" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "Constraint_Type": { "type": "integer", "format": "int32", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "ConstraintsConjunction": { "description": "A set of constraints that need to be satisfied\ntogether by a replica (i.e., by the replica's store).", @@ -707,7 +707,7 @@ "x-go-name": "NumReplicas" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "DatabaseDetailsResponse_Grant": { "type": "object", @@ -726,7 +726,7 @@ "x-go-name": "User" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "DatabasesResponse": { "type": "object", @@ -740,7 +740,7 @@ "x-go-name": "Databases" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "EventsResponse": { "description": "A set of event log entries. This is always limited\nto the latest N entries (N is enforced in the associated endpoint).", @@ -754,7 +754,7 @@ "x-go-name": "Events" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "EventsResponse_Event": { "type": "object", @@ -797,7 +797,7 @@ "x-go-name": "UniqueID" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "GCPolicy": { "type": "object", @@ -810,7 +810,7 @@ "x-go-name": "TTLSeconds" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "Key": { "description": "Key is a custom type for a byte string in proto\nmessages which refer to Cockroach keys.", @@ -819,7 +819,7 @@ "type": "integer", "format": "uint8" }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "Lease": { "description": "Lease contains information about range leases including the\nexpiration and lease holder.", @@ -853,13 +853,13 @@ "$ref": "#/definitions/ClockTimestamp" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "LeaseAcquisitionType": { "description": "The type of lease acquisition event that\nresulted in the current lease.", "type": "integer", "format": "int32", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "LeasePreference": { "description": "A preference about where range leases should be\nlocated.", @@ -873,13 +873,13 @@ "x-go-name": "Constraints" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "LeaseSequence": { "type": "integer", "format": "int64", "title": "A custom type for a lease sequence number.", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "ListSessionsError": { "type": "object", @@ -894,7 +894,7 @@ "$ref": "#/definitions/NodeID" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "ListSessionsResponse": { "type": "object", @@ -917,7 +917,7 @@ "x-go-name": "Sessions" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "Locality": { "description": "An ordered set of key value Tiers that describe a node's\nlocation. The tier keys should be the same across all nodes.", @@ -931,13 +931,13 @@ "x-go-name": "Tiers" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "NodeID": { "description": "A custom type for a cockroach node ID. (not a raft node ID)\n0 is not a valid NodeID.", "type": "integer", "format": "int32", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "PrettySpan": { "type": "object", @@ -951,7 +951,7 @@ "x-go-name": "StartKey" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "RangeProblems": { "type": "object", @@ -992,7 +992,7 @@ "x-go-name": "Underreplicated" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "RangeStatistics": { "description": "Describes statistics reported by a range. For internal use\nonly.", @@ -1011,7 +1011,7 @@ "x-go-name": "WritesPerSecond" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "ReplicaDescriptor": { "description": "Describes a replica location by node ID\n(corresponds to a host:port via lookup on gossip network) and store\nID (identifies the device).", @@ -1030,20 +1030,20 @@ "$ref": "#/definitions/ReplicaType" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "ReplicaID": { "type": "integer", "format": "int32", "title": "A custom type for a range replica ID.", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "ReplicaType": { "description": "All VOTER* types indicate a replica that participates in all raft activities,\nincluding voting for leadership and committing entries. Typically, this\nrequires a majority of voters to reach a decision. In a joint config, two\nseparate majorities are required: one from the set of replicas that have\neither type VOTER or VOTER_OUTGOING or VOTER_DEMOTING_{LEARNER, NON_VOTER},\nas well as that of the set of types VOTER and VOTER_INCOMING . For example,\nwhen type VOTER_FULL is assigned to replicas 1 and 2, while 3 is\nVOTER_OUTGOING and 4 is VOTER_INCOMING, then the two sets over which quorums\nneed to be achieved are {1,2,3} and {1,2,4}. Thus, {1,2} is a quorum of both,\n{1,3} is a quorum of the first but not the second, {1,4} is a quorum of the\nsecond but not the first, and {3,4} is a quorum of neither.", "type": "integer", "format": "int32", "title": "Identifies which raft activities a replica participates in. In\nnormal operation, VOTER_FULL, NON_VOTER, and LEARNER are the only used\nstates. However, atomic replication changes require a transition through a\n\"joint config\"; in this joint config, the VOTER_DEMOTING_{LEARNER, NON_VOTER}\nand VOTER_INCOMING types are used as well to denote voters which are being\ndowngraded to learners and newly added by the change, respectively. When\nbeing removed, a demoting voter is turning into a learner, which we prefer\nover a direct removal, which was used prior to v20.1 and uses the\nVOTER_OUTGOING type instead (see VersionChangeReplicasDemotion for details on\nwhy we're not doing that any more).", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "Session": { "type": "object", @@ -1116,13 +1116,13 @@ "x-go-name": "Username" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "StoreID": { "type": "integer", "format": "int32", "title": "StoreID is a custom type for a cockroach store ID.", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "Subzone": { "type": "object", @@ -1142,7 +1142,7 @@ "x-go-name": "PartitionName" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "SubzoneSpan": { "type": "object", @@ -1160,7 +1160,7 @@ "x-go-name": "SubzoneIndex" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "TableDetailsResponse": { "description": "Grants, column names, and indexes for\na table.", @@ -1216,7 +1216,7 @@ "$ref": "#/definitions/ZoneConfigurationLevel" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "TableDetailsResponse_Column": { "type": "object", @@ -1252,7 +1252,7 @@ "x-go-name": "Type" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "TableDetailsResponse_Grant": { "type": "object", @@ -1272,7 +1272,7 @@ "x-go-name": "User" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "TableDetailsResponse_Index": { "type": "object", @@ -1314,7 +1314,7 @@ "x-go-name": "Unique" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "Tier": { "type": "object", @@ -1331,7 +1331,7 @@ "x-go-name": "Value" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "Timestamp": { "type": "object", @@ -1355,7 +1355,7 @@ "x-go-name": "WallTime" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/util/hlc" + "x-go-package": "cockroachdb/cockroach/pkg/util/hlc" }, "TxnInfo": { "type": "object", @@ -1429,7 +1429,7 @@ "x-go-name": "TxnDescription" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "UUID": { "type": "array", @@ -1438,7 +1438,7 @@ "type": "integer", "format": "uint8" }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/util/uuid" + "x-go-package": "cockroachdb/cockroach/pkg/util/uuid" }, "UnresolvedAddr": { "type": "object", @@ -1455,7 +1455,7 @@ "x-go-name": "NetworkField" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/util" + "x-go-package": "cockroachdb/cockroach/pkg/util" }, "UsersResponse": { "type": "object", @@ -1470,7 +1470,7 @@ "x-go-name": "Users" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "UsersResponse_User": { "type": "object", @@ -1481,7 +1481,7 @@ "x-go-name": "Username" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "Version": { "type": "object", @@ -1509,7 +1509,7 @@ "x-go-name": "Patch" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/roachpb" + "x-go-package": "cockroachdb/cockroach/pkg/roachpb" }, "ZoneConfig": { "description": "For internal use only.", @@ -1517,7 +1517,7 @@ "title": "ZoneConfig holds configuration that applies to one or more ranges.", "properties": { "constraints": { - "description": "Constrains which stores the replicas can be stored on. The\norder in which the constraints are stored is arbitrary and may change.\nhttps://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160706_expressive_zone_config.md#constraint-system", + "description": "Constrains which stores the replicas can be stored on. The\norder in which the constraints are stored is arbitrary and may change.\nhttps://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160706_expressive_zone_config.md#constraint-system", "type": "array", "items": { "$ref": "#/definitions/ConstraintsConjunction" @@ -1528,7 +1528,7 @@ "$ref": "#/definitions/GCPolicy" }, "global_reads": { - "description": "Whether transactions operating over the range(s)\nshould be configured to provide non-blocking behavior, meaning that reads\ncan be served consistently from all replicas and do not block on writes. In\nexchange, writes get pushed into the future and must wait on commit to\nensure linearizability. For more, see:\nhttps://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200811_non_blocking_txns.md", + "description": "Whether transactions operating over the range(s)\nshould be configured to provide non-blocking behavior, meaning that reads\ncan be served consistently from all replicas and do not block on writes. In\nexchange, writes get pushed into the future and must wait on commit to\nensure linearizability. For more, see:\nhttps://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200811_non_blocking_txns.md", "type": "boolean", "x-go-name": "GlobalReads" }, @@ -1602,14 +1602,14 @@ "x-go-name": "VoterConstraints" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/config/zonepb" + "x-go-package": "cockroachdb/cockroach/pkg/config/zonepb" }, "ZoneConfigurationLevel": { "description": "The object level at which the configuration is defined. This is needed\nbecause objects without a specifically indicated Zone Configuration will\ninherit the configuration of their \"parent\".", "type": "integer", "format": "int32", "title": "ZoneConfigurationLevel indicates, for objects with a Zone Configuration,\nthe object level at which the configuration is defined. This is needed\nbecause objects without a specifically indicated Zone Configuration will\ninherit the configuration of their 'parent'.\nValid levels are unknown (0), cluster (1), database (2), or table (3).", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "x-go-package": "cockroachdb/cockroach/pkg/server/serverpb" }, "databaseDetailsResponse": { "type": "object", @@ -1622,7 +1622,7 @@ "x-go-name": "DescriptorID" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "databaseGrantsResponse": { "type": "object", @@ -1643,7 +1643,7 @@ "x-go-name": "Next" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "databaseTablesResponse": { "type": "object", @@ -1664,7 +1664,7 @@ "x-go-name": "TableNames" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "databasesResponse": { "type": "object", @@ -1684,7 +1684,7 @@ "x-go-name": "Next" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "eventsResponse": { "type": "object", @@ -1704,7 +1704,7 @@ "x-go-name": "Next" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "hotRangesResponse": { "type": "object", @@ -1733,7 +1733,7 @@ "x-go-name": "Errors" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "listSessionsResp": { "type": "object", @@ -1762,7 +1762,7 @@ } }, "x-go-name": "listSessionsResponse", - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "loginResponse": { "type": "object", @@ -1773,7 +1773,7 @@ "x-go-name": "Session" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "logoutResponse": { "type": "object", @@ -1784,7 +1784,7 @@ "x-go-name": "LoggedOut" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "nodeRangeResponse": { "type": "object", @@ -1797,7 +1797,7 @@ "$ref": "#/definitions/rangeInfo" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "nodeRangesResponse": { "type": "object", @@ -1818,7 +1818,7 @@ "x-go-name": "Ranges" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "nodeStatus": { "type": "object", @@ -1895,7 +1895,7 @@ "x-go-name": "UpdatedAt" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "nodesResponse": { "type": "object", @@ -1916,7 +1916,7 @@ "x-go-name": "Nodes" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "rangeDescriptorInfo": { "description": "Contains a subset of fields from the Cockroach-internal\nrange descriptor that are safe to be returned from APIs.", @@ -1959,7 +1959,7 @@ "x-go-name": "StoreID" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "rangeInfo": { "type": "object", @@ -2013,7 +2013,7 @@ "x-go-name": "Ticking" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "rangeResponse": { "type": "object", @@ -2026,7 +2026,7 @@ "x-go-name": "Responses" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "responseError": { "type": "object", @@ -2039,7 +2039,7 @@ "$ref": "#/definitions/NodeID" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" }, "tableDetailsResponse": { "title": "Response for tableDetails.", @@ -2064,7 +2064,7 @@ "x-go-name": "Users" } }, - "x-go-package": "github.com/cockroachdb/cockroach/pkg/server" + "x-go-package": "cockroachdb/cockroach/pkg/server" } }, "securityDefinitions": { diff --git a/src/current/_data/menus.yml b/src/current/_data/menus.yml index 62e78c29a37..dd2dfe20977 100644 --- a/src/current/_data/menus.yml +++ b/src/current/_data/menus.yml @@ -101,8 +101,6 @@ # resources_support: -# - name: GitHub -# url: https://github.com/cockroachdb/cockroach # - name: Slack # url: https://www.cockroachlabs.com/join-community/ # - name: Support Portal diff --git a/src/current/_data/v24.2/metrics/child-metrics.yml b/src/current/_data/v24.2/metrics/child-metrics.yml index afe92da0139..473d3c88f6b 100644 --- a/src/current/_data/v24.2/metrics/child-metrics.yml +++ b/src/current/_data/v24.2/metrics/child-metrics.yml @@ -1,6 +1,6 @@ # child-metrics.yml is a manually curated file of metrics that are included in the Child Metrics. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the v24.1.0 binary with the following command: diff --git a/src/current/_data/v24.3/metrics/child-metrics.yml b/src/current/_data/v24.3/metrics/child-metrics.yml index afe92da0139..473d3c88f6b 100644 --- a/src/current/_data/v24.3/metrics/child-metrics.yml +++ b/src/current/_data/v24.3/metrics/child-metrics.yml @@ -1,6 +1,6 @@ # child-metrics.yml is a manually curated file of metrics that are included in the Child Metrics. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the v24.1.0 binary with the following command: diff --git a/src/current/_data/v25.1/metrics/multi-dimensional-metrics.yml b/src/current/_data/v25.1/metrics/multi-dimensional-metrics.yml index d1b55ff5b1a..8cea8be7522 100644 --- a/src/current/_data/v25.1/metrics/multi-dimensional-metrics.yml +++ b/src/current/_data/v25.1/metrics/multi-dimensional-metrics.yml @@ -1,6 +1,6 @@ # multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: diff --git a/src/current/_data/v25.2/metrics/multi-dimensional-metrics.yml b/src/current/_data/v25.2/metrics/multi-dimensional-metrics.yml index a9d82024013..bad2be6397f 100644 --- a/src/current/_data/v25.2/metrics/multi-dimensional-metrics.yml +++ b/src/current/_data/v25.2/metrics/multi-dimensional-metrics.yml @@ -1,6 +1,6 @@ # multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: diff --git a/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml b/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml index a9d82024013..bad2be6397f 100644 --- a/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml +++ b/src/current/_data/v25.3/metrics/multi-dimensional-metrics.yml @@ -1,6 +1,6 @@ # multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: diff --git a/src/current/_data/v25.4/metrics/multi-dimensional-metrics.yml b/src/current/_data/v25.4/metrics/multi-dimensional-metrics.yml index a9d82024013..bad2be6397f 100644 --- a/src/current/_data/v25.4/metrics/multi-dimensional-metrics.yml +++ b/src/current/_data/v25.4/metrics/multi-dimensional-metrics.yml @@ -1,6 +1,6 @@ # multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: diff --git a/src/current/_data/v26.1/metrics/multi-dimensional-metrics.yml b/src/current/_data/v26.1/metrics/multi-dimensional-metrics.yml index a9d82024013..bad2be6397f 100644 --- a/src/current/_data/v26.1/metrics/multi-dimensional-metrics.yml +++ b/src/current/_data/v26.1/metrics/multi-dimensional-metrics.yml @@ -1,6 +1,6 @@ # multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: diff --git a/src/current/_data/v26.2/metrics/multi-dimensional-metrics.yml b/src/current/_data/v26.2/metrics/multi-dimensional-metrics.yml index a9d82024013..bad2be6397f 100644 --- a/src/current/_data/v26.2/metrics/multi-dimensional-metrics.yml +++ b/src/current/_data/v26.2/metrics/multi-dimensional-metrics.yml @@ -1,6 +1,6 @@ # multi-dimensional-metrics.yml is a manually curated file of metrics that are included in the Multi-Dimensional Metrics page. # The metrics are in the order of appearance in the comment: -# https://github.com/cockroachdb/cockroach/issues/124343#issuecomment-2117886012 +# cockroach#124343 # The tenant.consumption.* metrics are not included because they only apply to serverless. # # The corresponding metrics-list.csv file was generated using the cockroach binary with the following command: diff --git a/src/current/_includes/cockroach-generated/release-23.1/eventlog.md b/src/current/_includes/cockroach-generated/release-23.1/eventlog.md new file mode 100644 index 00000000000..1c12182df64 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/eventlog.md @@ -0,0 +1,3216 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `DEV` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.trace.log_statement_execute` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being added. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being dropped. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `Placement` | The new placement policy. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `PrimaryRegionName` | The new primary region. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `SurvivalGoal` | The new survival goal | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `NotVisible` | Set true if index is not visible. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | yes | +| `ColumnName` | The affected column. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | yes | +| `ConstraintName` | The name of the affected constraint. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `convert_to_schema` + +An event of type `convert_to_schema` is recorded when a database is converted to a schema. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being converted to a schema. | yes | +| `NewDatabaseParent` | The name of the parent database for the new schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | yes | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | yes | +| `IndexName` | The name of the new index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | yes | +| `Owner` | The name of the owner for the new schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | yes | +| `Owner` | The name of the owner for the new sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | yes | +| `Owner` | The name of the owner for the new table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | yes | +| `Owner` | The name of the owner for the new type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | yes | +| `Owner` | The name of the owner of the new view. | yes | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | yes | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | yes | +| `NewDatabaseName` | The new name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | yes | +| `NewFunctionName` | The new name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | yes | +| `NewSchemaName` | The new name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | yes | +| `NewTableName` | The new name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | yes | +| `NewTypeName` | The new name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | yes | +| `SQLSTATE` | The SQLSTATE code for the error. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | yes | +| `NewDescriptorName` | The new name of the affected descriptor. | yes | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobId` | The job id for enterprise changefeeds. | no | +| `EmittedBytes` | The number of bytes emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any Changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind) | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | yes | +| `TableName` | TableName is the name of the table on which the index was created. | yes | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | yes | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes sent by nodes for this query. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | + + + diff --git a/src/current/_includes/cockroach-generated/release-23.1/logformats.md b/src/current/_includes/cockroach-generated/release-23.1/logformats.md new file mode 100644 index 00000000000..8d94fd08e1c --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/logformats.md @@ -0,0 +1,550 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + +This is the legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, +followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. The following caveats apply: + + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. For an unambiguous alternative, + consider `crdb-v1-count`. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +## Format `crdb-v1-count` + +This is an alternative, backward-compatible legacy file format used from CockroachDB v2.0. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +Beware that the text of the log entry can span multiple lines. The following caveats apply: + + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Always present. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +## Format `crdb-v1-tty` + +Same textual format as `crdb-v1`. + +In addition, if the output stream happens to be a VT-compatible terminal, +and the flag `no-color` was *not* set in the configuration, the entries +are decorated using ANSI color codes. + +## Format `crdb-v1-tty-count` + +Same textual format as `crdb-v1-count`. + +In addition, if the output stream happens to be a VT-compatible terminal, +and the flag `no-color` was *not* set in the configuration, the entries +are decorated using ANSI color codes. + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +## Format `crdb-v2-tty` + +Same textual format as `crdb-v2`. + +In addition, if the output stream happens to be a VT-compatible terminal, +and the flag `no-color` was *not* set in the configuration, the entries +are decorated using ANSI color codes. + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field | Description | +|-------|-------------| +| `file` | The name of the source file where the event was emitted. | +| `goroutine` | The identifier of the goroutine where the event was emitted. | +| `line` | The line number where the event was emitted in the source. | +| `redactable` | Whether the payload is redactable (see below for details). | +| `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field | Description | +|---------------------|-------------| +| `channel` | The name of the logging channel where the event was sent. | +| `severity` | The severity of the event. | +| `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field | Description | +|---------------------|-------------| +| `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `instance_id` | The SQL instance ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `tenant_id` | The SQL tenant ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | For unstructured events, the flat text payload. | +| `event` | The logging event, if structured (see below for details). | +| `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + + + +## Format `json-compact` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field | Description | +|-------|-------------| +| `f` | The name of the source file where the event was emitted. | +| `g` | The identifier of the goroutine where the event was emitted. | +| `l` | The line number where the event was emitted in the source. | +| `r` | Whether the payload is redactable (see below for details). | +| `t` | The timestamp at which the event was emitted on the logging channel. | +| `v` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field | Description | +|---------------------|-------------| +| `C` | The name of the logging channel where the event was sent. | +| `sev` | The severity of the event. | +| `c` | The numeric identifier for the logging channel where the event was sent. | +| `n` | The entry number on this logging sink, relative to the last process restart. | +| `s` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field | Description | +|---------------------|-------------| +| `N` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | The SQL instance ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `T` | The SQL tenant ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | For unstructured events, the flat text payload. | +| `event` | The logging event, if structured (see below for details). | +| `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + + + +## Format `json-fluent` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field | Description | +|-------|-------------| +| `tag` | A Fluent tag for the event, formed by the process name and the logging channel. | +| `file` | The name of the source file where the event was emitted. | +| `goroutine` | The identifier of the goroutine where the event was emitted. | +| `line` | The line number where the event was emitted in the source. | +| `redactable` | Whether the payload is redactable (see below for details). | +| `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field | Description | +|---------------------|-------------| +| `channel` | The name of the logging channel where the event was sent. | +| `severity` | The severity of the event. | +| `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field | Description | +|---------------------|-------------| +| `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `instance_id` | The SQL instance ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `tenant_id` | The SQL tenant ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | For unstructured events, the flat text payload. | +| `event` | The logging event, if structured (see below for details). | +| `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + + + +## Format `json-fluent-compact` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field | Description | +|-------|-------------| +| `tag` | A Fluent tag for the event, formed by the process name and the logging channel. | +| `f` | The name of the source file where the event was emitted. | +| `g` | The identifier of the goroutine where the event was emitted. | +| `l` | The line number where the event was emitted in the source. | +| `r` | Whether the payload is redactable (see below for details). | +| `t` | The timestamp at which the event was emitted on the logging channel. | +| `v` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field | Description | +|---------------------|-------------| +| `C` | The name of the logging channel where the event was sent. | +| `sev` | The severity of the event. | +| `c` | The numeric identifier for the logging channel where the event was sent. | +| `n` | The entry number on this logging sink, relative to the last process restart. | +| `s` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field | Description | +|---------------------|-------------| +| `N` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | The SQL instance ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `T` | The SQL tenant ID where the event was generated, once known. Only reported for multi-tenant SQL servers. | +| `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | For unstructured events, the flat text payload. | +| `event` | The logging event, if structured (see below for details). | +| `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + + + diff --git a/src/current/_includes/cockroach-generated/release-23.1/logging.md b/src/current/_includes/cockroach-generated/release-23.1/logging.md new file mode 100644 index 00000000000..81dd8bbe287 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.trace.log_statement_execute` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-23.1/settings/settings.html b/src/current/_includes/cockroach-generated/release-23.1/settings/settings.html new file mode 100644 index 00000000000..ad0688d7306 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/settings/settings.html @@ -0,0 +1,271 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescription
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneck
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admission
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control ordering
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control ordering
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO ordering
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission control
admission.kv.stores.tenant_weights.enabled
booleanfalsewhen true, tenant weights are enabled for KV-stores admission control
admission.kv.tenant_weights.enabled
booleanfalsewhen true, tenant weights are enabled for KV admission control
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission control
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission control
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectory.
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUP
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to fail
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying reads
bulkio.stream_ingestion.minimum_flush_interval
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill up
changefeed.aggregator.flush_jitter
float0jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan request
changefeed.balance_range_distribution.enable
booleanfalseif enabled, the ranges are balanced equally among all nodes
changefeed.batch_reduction_retry_enabled
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizes
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can buffer
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabled
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementation
changefeed.lagging_ranges_polling_interval
duration1m0sthe polling rate at which lagging ranges are checked and corresponding metrics are updated. will be removed in v23.2 onwards
changefeed.lagging_ranges_threshold
duration3m0sspecifies the duration by which a range must be lagging behind the present to be considered as 'lagging' behind in metrics. will be removed in v23.2 in favor of a changefeed option
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeeds
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disables
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently webhook only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting value.
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an upload
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storage
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operations
cluster.organization
stringorganization name
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until reset
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressure
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licenses
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reported
enterprise.license
stringthe encoded cluster license
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:port
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is true
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is true
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is true
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is true
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is true
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is true
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is true
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retained
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfers
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latency
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunities
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfull
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfull
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfull
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io ops
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memory
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memory
kv.closed_timestamp.follower_reads_enabled
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp information
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventolog
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp records
kv.range_split.by_load_enabled
booleantrueallow automatic splits of ranges based on where load is concentrated
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splitting
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splitting
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabled
kv.rangefeed.range_stuck_threshold
duration1m0srestart rangefeeds if they don't emit anything for the specified threshold; 0 disables (kv.closed_timestamp.side_transport_interval takes precedence)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshots
kv.snapshot_recovery.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for recovery snapshots
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactions
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactions
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressed
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be over-ridden on a per-store basis using the --store flag
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a schedule
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachable
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related information
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labels
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations stored
server.clock.forward_jump_check_enabled
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panic
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.
server.controller.default_tenant
stringsystemname of the tenant to use to serve requests when clients don't specify a tenant
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlog
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purged
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authentication
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeout
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.
server.http.base_path
string/path to redirect the user to upon succcessful login
server.identity_map.configuration
stringsystem-identity to database-username mappings
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tables
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entries
server.max_connections_per_gateway
integer-1the maximum number of non-superuser SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.
server.oidc_authentication.autologin
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpoint
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)
server.oidc_authentication.client_id
stringsets OIDC client id
server.oidc_authentication.client_secret
stringsets OIDC client secret
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB Console
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purged
server.secondary_tenants.redact_trace.enabled
booleantruecontrols if server side traces are redacted for tenant operations
server.shutdown.connection_wait
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)
server.shutdown.drain_wait
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.drain_wait is to set the wait time for health probes to notice that the node is not ready.)
server.shutdown.lease_transfer_wait
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)
server.shutdown.query_wait
duration10sthe timeout for waiting for active queries to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered dead
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAM
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcrypt
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case.
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default cost
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purged
server.web_session_timeout
duration168h0m0sthe duration that a newly created web session will be valid
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privileges
sql.auth.createrole_allows_grant_role_membership.enabled
booleanfalseif set, users with CREATEROLE privilege can grant/revoke membership in roles
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an object
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membership
sql.auth.modify_cluster_setting_applies_to_all.enabled
booleantruea bool which indicates whether MODIFYCLUSTERSETTING is able to set all cluster settings or only settings with the sql.defaults prefix (deprecated)
sql.auth.resolve_membership_single_scan.enabled
booleantruedetermines whether to populate the role membership cache with a single scan
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cache
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in seconds
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event store
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_events
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowed
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowed
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowed
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowed
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.large_full_scan_rows
float1000default value for large_full_scan_rows session setting which determines the maximum table size allowed for a full scan when disallow_full_table_scans is set to true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.results_buffer.size
byte size16 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.defaults.zigzag_join.enabled
booleantruedefault value for enable_zigzag_join session setting; allows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storage
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disable
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disable
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing table
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detection
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identification
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latencies
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insights
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problem
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each node
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statistics
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memory
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memory
sql.metrics.max_mem_stmt_fingerprints
integer100000the maximum number of statement fingerprints stored in memory
sql.metrics.max_mem_txn_fingerprints
integer100000the maximum number of transaction fingerprints stored in memory
sql.metrics.statement_details.dump_to_logs
booleanfalsedump collected statement statistics to node logs when periodically cleared
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statistics
sql.metrics.statement_details.gateway_node.enabled
booleantruesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint ID
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memory
sql.metrics.statement_details.plan_collection.enabled
booleanfalseperiodically save a logical plan for each fingerprint
sql.metrics.statement_details.plan_collection.period
duration5m0sthe time until a new logical plan is collected
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statistics
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last region
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sent
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probability
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry job
sql.show_ranges_deprecated_behavior.enabled
booleantrueif set, SHOW RANGES and crdb_internal.ranges{_no_leases} behave with deprecated pre-v23.1 semantics. NB: the new SHOW RANGES interface has richer WITH options than pre-v23.1 SHOW RANGES.
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operators
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tables
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection mode
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refresh
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refresh
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup job
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to disk
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hour
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tables
sql.stats.forecasts.max_decrease
float0the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observation
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecast
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collection
sql.stats.histogram_collection.enabled
booleantruehistogram collection mode
sql.stats.histogram_samples.count
integer10000number of rows sampled for histogram construction during table statistics collection
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection mode
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columns
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction begins
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every CREATE STATISTICS job
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements request
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pages
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizer
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tables
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channel
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry logging
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objects
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned up
sql.trace.log_statement_execute
booleanfalseset to true to enable logging of executed statements
sql.trace.session_eventlog.enabled
booleanfalseset to true to enable session tracing; note that enabling this may have a negative performance impact
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_threshold
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL job
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL job
sql.ttl.job.enabled
booleantruewhether the TTL job is enabled
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs stored
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crash
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_duration
storage.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in sstables
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhere
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.
trace.debug.enable
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requests
trace.jaeger.agent
stringthe address of a Jaeger agent to receive traces using the Jaeger UDP Thrift protocol, as <host>:<port>. If no port is specified, 6381 will be used.
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are captured
trace.span_registry.enabled
booleantrueif set, ongoing traces can be seen at https://<ui>/#/debug/tracez
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.
version
version23.1set the active cluster version in the format '<major>.<minor>'
diff --git a/src/current/_includes/cockroach-generated/release-23.1/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-23.1/sql/aggregates.md new file mode 100644 index 00000000000..3488d264159 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/sql/aggregates.md @@ -0,0 +1,519 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-23.1/sql/functions.md b/src/current/_includes/cockroach-generated/release-23.1/sql/functions.md new file mode 100644 index 00000000000..16f11b043c3 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/sql/functions.md @@ -0,0 +1,3623 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
crdb_internal.merge_aggregated_stmt_metadata(input: jsonb[]) → jsonb

Merge an array of AggregatedStatementMetadata into a single JSONB object

+
Immutable
crdb_internal.merge_statement_stats(input: jsonb[]) → jsonb

Merge an array of appstatspb.StatementStatistics into a single JSONB object

+
Immutable
crdb_internal.merge_stats_metadata(input: jsonb[]) → jsonb

Merge an array of StmtStatsMetadata into a single JSONB object

+
Immutable
crdb_internal.merge_transaction_stats(input: jsonb[]) → jsonb

Merge an array of appstatspb.TransactionStatistics into a single JSONB object

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(anyelement...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(anyelement...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated there is no ordering

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
crdb_internal.job_payload_type(data: bytes) → string

Reads the type from the jobspb.Payload protocol message.

+
Immutable
crdb_internal.json_to_pb(pbname: string, json: jsonb) → bytes

Convert JSONB data to protocol message bytes

+
Immutable
crdb_internal.pb_to_json(pbname: string, data: bytes) → jsonb

Converts protocol message to its JSONB representation.

+
Immutable
crdb_internal.pb_to_json(pbname: string, data: bytes, emit_defaults: bool) → jsonb

Converts protocol message to its JSONB representation.

+
Immutable
crdb_internal.pb_to_json(pbname: string, data: bytes, emit_defaults: bool, emit_redacted: bool) → jsonb

Converts protocol message to its JSONB representation.

+
Immutable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Multi-region functions + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crdb_internal.filter_multiregion_fields_from_zone_config_sql(val: string) → string

Takes in a CONFIGURE ZONE SQL statement and returns a modified +SQL statement omitting multi-region related zone configuration fields. +If the CONFIGURE ZONE statement can be inferred by the database’s or +table’s zone configuration this will return NULL.

+
Stable
crdb_internal.reset_multi_region_zone_configs_for_database(id: int) → bool

Resets the zone configuration for a multi-region database to +match its original state. No-ops if the given database ID is not multi-region +enabled.

+
Volatile
crdb_internal.reset_multi_region_zone_configs_for_table(id: int) → bool

Resets the zone configuration for a multi-region table to +match its original state. No-ops if the given table ID is not a multi-region +table.

+
Volatile
crdb_internal.validate_multi_region_zone_configs() → bool

Validates all multi-region zone configurations are correctly setup +for the current database, including all tables, indexes and partitions underneath. +Returns an error if validation fails. This builtin uses un-leased versions of the +each descriptor, requiring extra round trips.

+
Volatile
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
crdb_internal.scan(span: bytes[]) → tuple{bytes AS key, bytes AS value, string AS ts}

Returns the raw keys and values from the specified span

+
Stable
crdb_internal.scan(start_key: bytes, end_key: bytes) → tuple{bytes AS key, bytes AS value, string AS ts}

Returns the raw keys and values with their timestamp from the specified span

+
Stable
crdb_internal.tenant_span_stats() → tuple{int AS database_id, int AS table_id, int AS range_count, int AS approximate_disk_bytes, int AS live_bytes, int AS total_bytes, float AS live_percentage}

Returns statistics (range count, disk size, live range bytes, total range bytes, live range byte percentage) for all of the tenant’s tables.

+
Stable
crdb_internal.tenant_span_stats(database_id: int) → tuple{int AS database_id, int AS table_id, int AS range_count, int AS approximate_disk_bytes, int AS live_bytes, int AS total_bytes, float AS live_percentage}

Returns statistics (range count, disk size, live range bytes, total range bytes, live range byte percentage) for tables of the provided database id.

+
Stable
crdb_internal.tenant_span_stats(database_id: int, table_id: int) → tuple{int AS database_id, int AS table_id, int AS range_count, int AS approximate_disk_bytes, int AS live_bytes, int AS total_bytes, float AS live_percentage}

Returns statistics (range count, disk size, live range bytes, total range bytes, live range byte percentage) for the provided table id.

+
Stable
crdb_internal.testing_callback(name: string) → int

For internal CRDB testing only. The function calls a callback identified by name registered with the server by the test.

+
Volatile
crdb_internal.unary_table() → tuple

Produces a virtual table containing a single row with no values.

+

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(string...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
crdb_internal.decode_external_plan_gist(gist: string) → string

Returns rows of output similar to EXPLAIN from a gist such as those found in planGists element of the statistics column of the statement_statistics table without attempting to resolve tables or indexes.

+
Volatile
crdb_internal.decode_plan_gist(gist: string) → string

Returns rows of output similar to EXPLAIN from a gist such as those found in planGists element of the statistics column of the statement_statistics table.

+
Volatile
crdb_internal.gen_rand_ident(name_pattern: string, count: int) → string

Returns random SQL identifiers.

+

gen_rand_ident(pattern, count) is an alias for gen_rand_ident(pattern, count, ‘’). +See the documentation of the other gen_rand_ident overload for details.

+
Volatile
crdb_internal.gen_rand_ident(name_pattern: string, count: int, parameters: jsonb) → string

Returns count random SQL identifiers that resemble the name_pattern.

+

The last argument is a JSONB object containing the following optional fields:

+
    +
  • “seed”: the seed to use for the pseudo-random generator (default: random).
  • +
  • “number”: whether to add a number to the generated names (default true). +When enabled, occurrences of the character ‘#’ in the name pattern are +replaced by the number. If ‘#’ is not present, the number is added at the end.
  • +
  • “noise”: whether to add noise to the generated names (default true). +It adds a non-zero probability for each of the probability options below left to zero. +(To enable noise generally but disable one type of noise, set its probability to -1.)
  • +
  • “punctuate”: probability of adding punctuation.
  • +
  • “fmt”: probability of adding random Go/C formatting directives.
  • +
  • “escapes”: probability of adding random escape sequences.
  • +
  • “quote”: probabiltiy of adding single or double quotes.
  • +
  • “emote”: probability of adding emojis.
  • +
  • “space”: probability of adding simple spaces.
  • +
  • “whitespace”: probability of adding complex whitespace.
  • +
  • “capitals”: probability of using capital letters. +Note: the name pattern must contain ASCII letters already for capital letters to be used.
  • +
  • “diacritics”: probability of adding diacritics.
  • +
  • “diacritic_depth”: max number of diacritics to add at a time (default 1).
  • +
  • “zalgo”: special option that overrides diacritics and diacritic_depth (default false).
  • +
+
Volatile
crdb_internal.show_create_all_schemas(database_name: string) → string

Returns rows of CREATE schema statements. +The output can be used to recreate a database.’

+
Volatile
crdb_internal.show_create_all_tables(database_name: string) → string

Returns rows of CREATE table statements followed by +ALTER table statements that add table constraints. The rows are ordered +by dependencies. All foreign keys are added after the creation of the table +in the alter statements. +It is not recommended to perform this operation on a database with many +tables. +The output can be used to recreate a database.’

+
Volatile
crdb_internal.show_create_all_types(database_name: string) → string

Returns rows of CREATE type statements. +The output can be used to recreate a database.’

+
Volatile
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and then reports the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, anyelement...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input on delimiter and return the value in the return_index_pos position (starting at 1).

+

For example, split_part('123.456.789.0','.',3)returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+
Volatile
crdb_internal.active_version() → jsonb

Returns the current active cluster version.

+
Volatile
crdb_internal.approximate_timestamp(timestamp: decimal) → timestamp

Converts the crdb_internal_mvcc_timestamp column into an approximate timestamp.

+
Immutable
crdb_internal.assignment_cast(val: anyelement, type: anyelement) → anyelement

This function is used internally to perform assignment casts during mutations.

+
Stable
crdb_internal.check_consistency(stats_only: bool, start_key: bytes, end_key: bytes) → tuple{int AS range_id, bytes AS start_key, string AS start_key_pretty, string AS status, string AS detail, interval AS duration}

Runs a consistency check on ranges touching the specified key range. an empty start or end key is treated as the minimum and maximum possible, respectively. stats_only should only be set to false when targeting a small number of ranges to avoid overloading the cluster. Each returned row contains the range ID, the status (a roachpb.CheckConsistencyResponse_Status), and verbose detail.

+

Example usage:

+

SELECT * FROM crdb_internal.check_consistency(true, b'\x02', b'\x04')

+
Volatile
crdb_internal.check_password_hash_format(password: bytes) → string

This function checks whether a string is a precomputed password hash. Returns the hash algorithm.

+
Immutable
crdb_internal.cluster_id() → uuid

Returns the logical cluster ID for this tenant.

+
Stable
crdb_internal.cluster_name() → string

Returns the cluster name.

+
Volatile
crdb_internal.cluster_setting_encoded_default(setting: string) → string

Returns the encoded default value of the given cluster setting.

+
Immutable
crdb_internal.create_join_token() → string

Creates a join token for use when adding a new node to a secure cluster.

+
Volatile
crdb_internal.create_session_revival_token() → bytes

Generate a token that can be used to create a new session for the current user.

+
Volatile
crdb_internal.create_sql_schema_telemetry_job() → int

This function is used to create a schema telemetry job instance.

+
Volatile
crdb_internal.decode_cluster_setting(setting: string, value: string) → string

Decodes the given encoded value for a cluster setting.

+
Immutable
crdb_internal.deserialize_session(session: bytes) → bool

This function deserializes the serialized variables into the current session.

+
Volatile
crdb_internal.encode_key(table_id: int, index_id: int, row_tuple: anyelement) → bytes

Generate the key for a row on a particular table and index.

+
Stable
crdb_internal.fingerprint(span: bytes[], start_time: decimal, all_revisions: bool) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.fingerprint(span: bytes[], start_time: timestamptz, all_revisions: bool) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.fingerprint(span: bytes[], stripped: bool) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.force_assertion_error(msg: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.force_error(errorCode: string, msg: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.force_log_fatal(msg: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.force_panic(msg: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.force_panic(msg: string, mode: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.force_retry(val: interval) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.generate_test_objects(names: string, counts: int[]) → jsonb

Generates a number of objects whose name follow the provided pattern.

+

generate_test_objects(pat, counts) is an alias for +generate_test_objects(’{“names”:pat, “counts”:counts}’::jsonb)

+
Volatile
crdb_internal.generate_test_objects(names: string, number: int) → jsonb

Generates a number of objects whose name follow the provided pattern.

+

generate_test_objects(pat, num) is an alias for +generate_test_objects(’{“names”:pat, “counts”:[num]}’::jsonb)

+
Volatile
crdb_internal.generate_test_objects(parameters: jsonb) → jsonb

Generates a number of objects whose name follow the provided pattern.

+

Parameters:

+
    +
  • “names”: pattern to use to name the generated objects (default: +“test”).
  • +
  • “counts”: counts of generated objects (default: [10]).
  • +
  • “dry_run”: prepare the schema but do not actually write it +(default: false).
  • +
  • “seed”: random seed to use (default: auto).
  • +
  • “randomize_columns”: whether to randomize the column names on tables +(default: true).
  • +
  • “table_templates”: table templates to use. +If the last part of “names” is “_”, the name of the template +will be used as base pattern during name generation for tables. +Otherwise, the last part of “names” will be used as pattern. +If no table templates are specified, a simple template is used.
  • +
  • “name_gen”: configuration for the name generation, see below.
  • +
+

Name generation options:

+
    +
  • “number”: whether to add a number to the generated names (default true). +When enabled, occurrences of the character ‘#’ in the name pattern are +replaced by the number. If ‘#’ is not present, the number is added at the end.
  • +
  • “noise”: whether to add noise to the generated names (default true). +It adds a non-zero probability for each of the probability options below left to zero. +(To enable noise generally but disable one type of noise, set its probability to -1.)
  • +
  • “punctuate”: probability of adding punctuation.
  • +
  • “fmt”: probability of adding random Go/C formatting directives.
  • +
  • “escapes”: probability of adding random escape sequences.
  • +
  • “quote”: probabiltiy of adding single or double quotes.
  • +
  • “emote”: probability of adding emojis.
  • +
  • “space”: probability of adding simple spaces.
  • +
  • “whitespace”: probability of adding complex whitespace.
  • +
  • “capitals”: probability of using capital letters. +Note: the name pattern must contain ASCII letters already for capital letters to be used.
  • +
  • “diacritics”: probability of adding diacritics.
  • +
  • “diacritic_depth”: max number of diacritics to add at a time (default 1).
  • +
  • “zalgo”: special option that overrides diacritics and diacritic_depth (default false).
  • +
+
Volatile
crdb_internal.get_database_id(name: string) → intStable
crdb_internal.get_namespace_id(parent_id: int, name: string) → intStable
crdb_internal.get_namespace_id(parent_id: int, parent_schema_id: int, name: string) → intStable
crdb_internal.get_vmodule() → string

Returns the vmodule configuration on the gateway node processing this request.

+
Volatile
crdb_internal.get_zone_config(namespace_id: int) → bytesStable
crdb_internal.has_role_option(option: string) → bool

Returns whether the current user has the specified role option

+
Stable
crdb_internal.index_span(table_id: int, index_id: int) → bytes[]

This function returns the span that contains the keys for the given index.

+
Leakproof
crdb_internal.is_admin() → bool

Retrieves the current user’s admin status.

+
Stable
crdb_internal.is_at_least_version(version: string) → bool

Returns true if the cluster version is not older than the argument.

+
Volatile
crdb_internal.is_constraint_active(table_name: string, constraint_name: string) → bool

This function is used to determine if a given constraint is currently. +active for the current transaction.

+
Volatile
crdb_internal.lease_holder(key: bytes) → int

This function is used to fetch the leaseholder corresponding to a request key

+
Volatile
crdb_internal.list_sql_keys_in_range(range_id: int) → tuple{string AS key, string AS value, string AS ts}

Returns all SQL K/V pairs within the requested range.

+
Volatile
crdb_internal.locality_value(key: string) → string

Returns the value of the specified locality key.

+
Stable
crdb_internal.no_constant_folding(input: anyelement) → anyelement

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.node_executable_version() → string

Returns the version of CockroachDB this node is running.

+
Volatile
crdb_internal.node_id() → int

Returns the node ID.

+
Stable
crdb_internal.notice(msg: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.notice(severity: string, msg: string) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.num_geo_inverted_index_entries(table_id: int, index_id: int, val: geography) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_geo_inverted_index_entries(table_id: int, index_id: int, val: geometry) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_inverted_index_entries(val: string, version: int) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_inverted_index_entries(val: anyelement[]) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_inverted_index_entries(val: anyelement[], version: int) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_inverted_index_entries(val: jsonb) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_inverted_index_entries(val: jsonb, version: int) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.num_inverted_index_entries(val: tsvector, version: int) → int

This function is used only by CockroachDB’s developers for testing purposes.

+
Stable
crdb_internal.payloads_for_span(span_id: int) → tuple{string AS payload_type, jsonb AS payload_jsonb}

Returns the payload(s) of the requested span and all its children.

+
Volatile
crdb_internal.payloads_for_trace(trace_id: int) → tuple{int AS span_id, string AS payload_type, jsonb AS payload_jsonb}

Returns the payload(s) of the requested trace.

+
Volatile
crdb_internal.pretty_key(raw_key: bytes, skip_fields: int) → string

This function is used only by CockroachDB’s developers for testing purposes.

+
Immutable
crdb_internal.pretty_span(raw_key_start: bytes, raw_key_end: bytes, skip_fields: int) → string

This function is used only by CockroachDB’s developers for testing purposes.

+
Immutable
crdb_internal.range_stats(key: bytes) → jsonb

This function is used to retrieve range statistics information as a JSON object.

+
Volatile
crdb_internal.read_file(uri: string) → bytes

Read the content of the file at the supplied external storage URI

+
Volatile
crdb_internal.repair_ttl_table_scheduled_job(oid: oid) → void

Repairs the scheduled job for a TTL table if it is missing.

+
Volatile
crdb_internal.request_statement_bundle(stmtFingerprint: string, samplingProbability: float, minExecutionLatency: interval, expiresAfter: interval) → bool

Used to request statement bundle for a given statement fingerprint +that has execution latency greater than the ‘minExecutionLatency’. If the +‘expiresAfter’ argument is empty, then the statement bundle request never +expires until the statement bundle is collected

+
Volatile
crdb_internal.reset_activity_tables() → bool

This function is used to clear the {statement|transaction} activity system tables.

+
Volatile
crdb_internal.reset_index_usage_stats() → bool

This function is used to clear the collected index usage statistics.

+
Volatile
crdb_internal.reset_sql_stats() → bool

This function is used to clear the collected SQL statistics.

+
Volatile
crdb_internal.revalidate_unique_constraint(table_name: string, constraint_name: string) → void

This function is used to revalidate the given unique constraint in the given +table. Returns an error if validation fails.

+
Volatile
crdb_internal.revalidate_unique_constraints_in_all_tables() → void

This function is used to revalidate all unique constraints in tables +in the current database. Returns an error if validation fails.

+
Volatile
crdb_internal.revalidate_unique_constraints_in_table(table_name: string) → void

This function is used to revalidate all unique constraints in the given +table. Returns an error if validation fails.

+
Volatile
crdb_internal.round_decimal_values(val: decimal, scale: int) → decimal

This function is used internally to round decimal values during mutations.

+
Immutable
crdb_internal.round_decimal_values(val: decimal[], scale: int) → decimal[]

This function is used internally to round decimal array values during mutations.

+
Stable
crdb_internal.schedule_sql_stats_compaction() → bool

This function is used to start a SQL stats compaction job.

+
Volatile
crdb_internal.serialize_session() → bytes

This function serializes the variables in the current session.

+
Volatile
crdb_internal.set_trace_verbose(trace_id: int, verbosity: bool) → bool

Returns true if root span was found and verbosity was set, false otherwise.

+
Volatile
crdb_internal.set_vmodule(vmodule_string: string) → int

Set the equivalent of the --vmodule flag on the gateway node processing this request; it affords control over the logging verbosity of different files. Example syntax: crdb_internal.set_vmodule('recordio=2,file=1,gfs*=3'). Reset with: crdb_internal.set_vmodule(''). Raising the verbosity can severely affect performance.

+
Volatile
crdb_internal.table_span(table_id: int) → bytes[]

This function returns the span that contains the keys for the given table.

+
Leakproof
crdb_internal.trace_id() → int

Returns the current trace ID or an error if no trace is open.

+
Volatile
crdb_internal.unsafe_clear_gossip_info(key: string) → bool

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.validate_session_revival_token(token: bytes) → bool

Validate a token that was created by create_session_revival_token. Intended for testing.

+
Volatile
crdb_internal.validate_ttl_scheduled_jobs() → void

Validate all TTL tables have a valid scheduled job attached.

+
Volatile
crdb_internal.void_func() → void

This function is used only by CockroachDB’s developers for testing purposes.

+
Volatile
crdb_internal.write_file(data: bytes, uri: string) → int

Write the content passed to a file at the supplied external storage URI

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### System repair functions + + + + + + +
Function → ReturnsDescriptionVolatility
crdb_internal.force_delete_table_data(id: int) → bool

This function can be used to clear the data belonging to a table, when the table cannot be dropped.

+
Volatile
crdb_internal.repair_catalog_corruption(descriptor_id: int, corruption: string) → bool

repair_catalog_corruption(descriptor_id,corruption) attempts to repair corrupt records in system tables associated with that descriptor id

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(anyelement...) → int

Return size in bytes of the column provided as an argument

+
Immutable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-23.1/sql/operators.md b/src/current/_includes/cockroach-generated/release-23.1/sql/operators.md new file mode 100644 index 00000000000..e7c7f1e20f3 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/sql/operators.md @@ -0,0 +1,610 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-23.1/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-23.1/sql/window_functions.md new file mode 100644 index 00000000000..aee326add40 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.1/sql/window_functions.md @@ -0,0 +1,377 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-23.2/eventlog.md b/src/current/_includes/cockroach-generated/release-23.2/eventlog.md new file mode 100644 index 00000000000..d5a6d773c29 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/eventlog.md @@ -0,0 +1,3314 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `DEV` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being added. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being dropped. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `Placement` | The new placement policy. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `PrimaryRegionName` | The new primary region. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `SurvivalGoal` | The new survival goal | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | yes | +| `ColumnName` | The affected column. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | yes | +| `ConstraintName` | The name of the affected constraint. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `convert_to_schema` + +An event of type `convert_to_schema` is recorded when a database is converted to a schema. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being converted to a schema. | yes | +| `NewDatabaseParent` | The name of the parent database for the new schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | yes | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | yes | +| `IndexName` | The name of the new index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | yes | +| `Owner` | The name of the owner for the new schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | yes | +| `Owner` | The name of the owner for the new sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | yes | +| `Owner` | The name of the owner for the new table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | yes | +| `Owner` | The name of the owner for the new type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | yes | +| `Owner` | The name of the owner of the new view. | yes | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | yes | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | yes | +| `NewDatabaseName` | The new name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | yes | +| `NewFunctionName` | The new name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | yes | +| `NewSchemaName` | The new name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | yes | +| `NewTableName` | The new name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | yes | +| `NewTypeName` | The new name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | yes | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | yes | +| `NewDescriptorName` | The new name of the affected descriptor. | yes | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobId` | The job id for enterprise changefeeds. | no | +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any Changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind) | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | yes | +| `TableName` | TableName is the name of the table on which the index was created. | yes | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | yes | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes sent by nodes for this query. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | + + + diff --git a/src/current/_includes/cockroach-generated/release-23.2/logformats.md b/src/current/_includes/cockroach-generated/release-23.2/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-23.2/logging.md b/src/current/_includes/cockroach-generated/release-23.2/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-23.2/settings/settings.html b/src/current/_includes/cockroach-generated/release-23.2/settings/settings.html new file mode 100644 index 00000000000..561114c87ca --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/settings/settings.html @@ -0,0 +1,293 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckDedicated/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingServerless/Dedicated/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlDedicated/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlServerless/Dedicated/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlServerless/Dedicated/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryServerless/Dedicated/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPServerless/Dedicated/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failServerless/Dedicated/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsServerless/Dedicated/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upDedicated/Self-Hosted
changefeed.aggregator.flush_jitter
float0jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Serverless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted
changefeed.balance_range_distribution.enabled
(alias: changefeed.balance_range_distribution.enable)
booleanfalseif enabled, the ranges are balanced equally among all nodes. Note that this is supported only in export mode with initial_scan=only.Serverless/Dedicated/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted
changefeed.frontier_highwater_lag_checkpoint_threshold
duration10m0scontrols the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabledServerless/Dedicated/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedServerless/Dedicated/Self-Hosted
changefeed.min_highwater_advance
duration0sminimum amount of time the changefeed high water mark must advance for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier advances, as long as the rate of checkpointing keeps up with the rate of frontier changesServerless/Dedicated/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampServerless/Dedicated/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesServerless/Dedicated/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueServerless/Dedicated/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadServerless/Dedicated/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageServerless/Dedicated/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsServerless/Dedicated/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetServerless/Dedicated/Self-Hosted
cluster.organization
stringorganization nameServerless/Dedicated/Self-Hosted (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetServerless/Dedicated/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureServerless/Dedicated/Self-Hosted (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Serverless/Dedicated/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenServerless/Dedicated/Self-Hosted (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesServerless/Dedicated/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedServerless/Dedicated/Self-Hosted
enterprise.license
stringthe encoded cluster licenseServerless/Dedicated/Self-Hosted (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portServerless/Dedicated/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Serverless/Dedicated/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueServerless/Dedicated/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedServerless/Dedicated/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersDedicated/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyDedicated/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesDedicated/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationServerless/Dedicated/Self-Hosted (read-only)
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsServerless/Dedicated/Self-Hosted (read-only)
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedServerless/Dedicated/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalServerless/Dedicated/Self-Hosted (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledServerless/Dedicated/Self-Hosted (read-only)
kv.rangefeed.range_stuck_threshold
duration1m0srestart rangefeeds if they don't emit anything for the specified threshold; 0 disables (kv.rangefeed.closed_timestamp_refresh_interval takes precedence)Serverless/Dedicated/Self-Hosted
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Dedicated/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Dedicated/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Dedicated/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsDedicated/Self-Hosted
kv.snapshot_receiver.excise.enabled
booleanfalseset to true to use the experimental and unstable excise operation instead of range deletions for KV snapshotsDedicated/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsServerless/Dedicated/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsServerless/Dedicated/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedServerless/Dedicated/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be over-ridden on a per-store basis using the --store flagDedicated/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleServerless/Dedicated/Self-Hosted
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Serverless/Dedicated/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableServerless/Dedicated/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Serverless/Dedicated/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Serverless/Dedicated/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogServerless/Dedicated/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationServerless/Dedicated/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutServerless/Dedicated/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Serverless/Dedicated/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginServerless/Dedicated/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsServerless/Dedicated/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretServerless/Dedicated/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleServerless/Dedicated/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Serverless/Dedicated/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedDedicated/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeServerless/Dedicated/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Serverless/Dedicated/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Dedicated/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadServerless/Dedicated/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMServerless/Dedicated/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptServerless/Dedicated/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case.Serverless/Dedicated/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Serverless/Dedicated/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costServerless/Dedicated/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesServerless/Dedicated/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectServerless/Dedicated/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipServerless/Dedicated/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdServerless/Dedicated/Self-Hosted
sql.auth.resolve_membership_single_scan.enabled
booleantruedetermines whether to populate the role membership cache with a single scanServerless/Dedicated/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheServerless/Dedicated/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsServerless/Dedicated/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeServerless/Dedicated/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Serverless/Dedicated/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedServerless/Dedicated/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedServerless/Dedicated/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.large_full_scan_rows
float1000default value for large_full_scan_rows session setting which determines the maximum table size allowed for a full scan when disallow_full_table_scans is set to true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.results_buffer.size
byte size16 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableServerless/Dedicated/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableServerless/Dedicated/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesServerless/Dedicated/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsServerless/Dedicated/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemServerless/Dedicated/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeServerless/Dedicated/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Serverless/Dedicated/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Serverless/Dedicated/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer100000the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer100000the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.enabled
booleanfalseperiodically save a logical plan for each fingerprintServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.period
duration5m0sthe time until a new logical plan is collectedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Serverless/Dedicated/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsServerless/Dedicated/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Serverless/Dedicated/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionServerless/Dedicated/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentServerless/Dedicated/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityServerless/Dedicated/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobServerless/Dedicated/Self-Hosted (read-only)
sql.show_ranges_deprecated_behavior.enabled
booleanfalseif set, SHOW RANGES and crdb_internal.ranges{_no_leases} behave with deprecated pre-v23.1 semantics. NB: the new SHOW RANGES interface has richer WITH options than pre-v23.1 SHOW RANGES.Serverless/Dedicated/Self-Hosted
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsServerless/Dedicated/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesServerless/Dedicated/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer10000number of rows sampled for histogram construction during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsServerless/Dedicated/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleantrueset to true to collect table statistics histograms on non-indexed JSON columnsServerless/Dedicated/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsServerless/Dedicated/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every CREATE STATISTICS jobServerless/Dedicated/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestServerless/Dedicated/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesServerless/Dedicated/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerServerless/Dedicated/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesServerless/Dedicated/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleanfalseset to true to collect table statistics on virtual computed columnsServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Serverless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.session_eventlog.enabled
booleanfalseset to true to enable session tracing; note that enabling this may have a negative performance impactServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)Serverless/Dedicated/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeedsServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleanfalseset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
storage.experimental.eventually_file_only_snapshots.enabled
booleanfalseset to true to use eventually-file-only-snapshots even when kv.snapshot_receiver.excise.enabled is falseDedicated/Self-Hosted
storage.ingest_split.enabled
booleanfalseset to true to use ingest-time splitting to lower write-amplification (experimental)Dedicated/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashServerless/Dedicated/Self-Hosted (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationServerless/Dedicated/Self-Hosted
storage.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in sstablesServerless/Dedicated/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereDedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Serverless/Dedicated/Self-Hosted (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Serverless/Dedicated/Self-Hosted (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted
trace.span_registry.enabled
booleantrueif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeServerless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted
version
version23.2set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-23.2/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-23.2/sql/aggregates.md new file mode 100644 index 00000000000..fe8a7b0a0bb --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/sql/aggregates.md @@ -0,0 +1,531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-23.2/sql/functions.md b/src/current/_includes/cockroach-generated/release-23.2/sql/functions.md new file mode 100644 index 00000000000..370dfb4cfcd --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/sql/functions.md @@ -0,0 +1,3436 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(anyelement...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(anyelement...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → string

Returns set of index recommendations

+
Immutable
workload_index_recs(timestamptz: timestamptz) → string

Returns set of index recommendations

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(string...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and then reports the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, anyelement...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input on delimiter and return the value in the return_index_pos position (starting at 1).

+

For example, split_part('123.456.789.0','.',3)returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(anyelement...) → int

Return size in bytes of the column provided as an argument

+
Immutable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-23.2/sql/operators.md b/src/current/_includes/cockroach-generated/release-23.2/sql/operators.md new file mode 100644 index 00000000000..dde5d133a6c --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/sql/operators.md @@ -0,0 +1,635 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-23.2/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-23.2/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-23.2/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.1/eventlog.md b/src/current/_includes/cockroach-generated/release-24.1/eventlog.md new file mode 100644 index 00000000000..f7012dc84be --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/eventlog.md @@ -0,0 +1,3290 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being added. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `RegionName` | The region being dropped. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `Placement` | The new placement policy. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `PrimaryRegionName` | The new primary region. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | yes | +| `SurvivalGoal` | The new survival goal | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | yes | +| `ColumnName` | The affected column. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | yes | +| `ConstraintName` | The name of the affected constraint. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | yes | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | yes | +| `IndexName` | The name of the new index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | yes | +| `Owner` | The name of the owner for the new schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | yes | +| `Owner` | The name of the owner for the new sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | yes | +| `Owner` | The name of the owner for the new table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | yes | +| `Owner` | The name of the owner for the new type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | yes | +| `Owner` | The name of the owner of the new view. | yes | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | yes | +| `IndexName` | The name of the affected index. | yes | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | yes | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | yes | +| `NewDatabaseName` | The new name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | yes | +| `NewFunctionName` | The new name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | yes | +| `NewSchemaName` | The new name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | yes | +| `NewTableName` | The new name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | yes | +| `NewTypeName` | The new name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | yes | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | yes | +| `NewDescriptorName` | The new name of the affected descriptor. | yes | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | yes | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobId` | The job id for enterprise changefeeds. | no | +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any Changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind) | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | yes | +| `TableName` | TableName is the name of the table on which the index was created. | yes | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | yes | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes sent by nodes for this query. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | + + + diff --git a/src/current/_includes/cockroach-generated/release-24.1/logformats.md b/src/current/_includes/cockroach-generated/release-24.1/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-24.1/logging.md b/src/current/_includes/cockroach-generated/release-24.1/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-24.1/settings/settings.html b/src/current/_includes/cockroach-generated/release-24.1/settings/settings.html new file mode 100644 index 00000000000..be6ebf12087 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/settings/settings.html @@ -0,0 +1,310 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckDedicated/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingServerless/Dedicated/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlDedicated/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlServerless/Dedicated/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlServerless/Dedicated/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryServerless/Dedicated/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPServerless/Dedicated/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failServerless/Dedicated/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsServerless/Dedicated/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upDedicated/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Serverless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Serverless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted
changefeed.frontier_highwater_lag_checkpoint_threshold
duration10m0scontrols the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabledServerless/Dedicated/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleanfalseif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsServerless/Dedicated/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedServerless/Dedicated/Self-Hosted
changefeed.min_highwater_advance
duration0sminimum amount of time the changefeed high water mark must advance for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier advances, as long as the rate of checkpointing keeps up with the rate of frontier changesServerless/Dedicated/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampServerless/Dedicated/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesServerless/Dedicated/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueServerless/Dedicated/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadServerless/Dedicated/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageServerless/Dedicated/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsServerless/Dedicated/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetServerless/Dedicated/Self-Hosted
cluster.organization
stringorganization nameServerless/Dedicated/Self-Hosted (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetServerless/Dedicated/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureServerless/Dedicated/Self-Hosted (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Serverless/Dedicated/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenServerless/Dedicated/Self-Hosted (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesServerless/Dedicated/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedServerless/Dedicated/Self-Hosted
enterprise.license
stringthe encoded cluster licenseServerless/Dedicated/Self-Hosted (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portServerless/Dedicated/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Serverless/Dedicated/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueServerless/Dedicated/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedServerless/Dedicated/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersDedicated/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyDedicated/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesDedicated/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportServerless/Dedicated/Self-Hosted (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationServerless/Dedicated/Self-Hosted (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Serverless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Serverless/Dedicated/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsServerless/Dedicated/Self-Hosted (read-only)
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedServerless/Dedicated/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalServerless/Dedicated/Self-Hosted (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledServerless/Dedicated/Self-Hosted (read-only)
kv.rangefeed.range_stuck_threshold
duration1m0srestart rangefeeds if they don't emit anything for the specified threshold; 0 disables (kv.rangefeed.closed_timestamp_refresh_interval takes precedence)Serverless/Dedicated/Self-Hosted
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Dedicated/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Dedicated/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Dedicated/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsDedicated/Self-Hosted
kv.snapshot_receiver.excise.enabled
booleantrueset to false to disable excises in place of range deletions for KV snapshotsDedicated/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsServerless/Dedicated/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsServerless/Dedicated/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueDedicated/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleServerless/Dedicated/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userServerless/Dedicated/Self-Hosted (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Serverless/Dedicated/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableServerless/Dedicated/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Serverless/Dedicated/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Serverless/Dedicated/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogServerless/Dedicated/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationServerless/Dedicated/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutServerless/Dedicated/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Serverless/Dedicated/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginServerless/Dedicated/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsServerless/Dedicated/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretServerless/Dedicated/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleServerless/Dedicated/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Serverless/Dedicated/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedDedicated/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeServerless/Dedicated/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Serverless/Dedicated/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Dedicated/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outServerless/Dedicated/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadServerless/Dedicated/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMServerless/Dedicated/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptServerless/Dedicated/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case.Serverless/Dedicated/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Serverless/Dedicated/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costServerless/Dedicated/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesServerless/Dedicated/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectServerless/Dedicated/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipServerless/Dedicated/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdServerless/Dedicated/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheServerless/Dedicated/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsServerless/Dedicated/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeServerless/Dedicated/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Serverless/Dedicated/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedServerless/Dedicated/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedServerless/Dedicated/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.large_full_scan_rows
float1000default value for large_full_scan_rows session setting which determines the maximum table size allowed for a full scan when disallow_full_table_scans is set to true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableServerless/Dedicated/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableServerless/Dedicated/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesServerless/Dedicated/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsServerless/Dedicated/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemServerless/Dedicated/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeServerless/Dedicated/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Serverless/Dedicated/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Serverless/Dedicated/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.enabled
booleanfalseperiodically save a logical plan for each fingerprintServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.period
duration5m0sthe time until a new logical plan is collectedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Serverless/Dedicated/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsServerless/Dedicated/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Serverless/Dedicated/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionServerless/Dedicated/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentServerless/Dedicated/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityServerless/Dedicated/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobServerless/Dedicated/Self-Hosted (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsServerless/Dedicated/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesServerless/Dedicated/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer10000number of rows sampled for histogram construction during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsServerless/Dedicated/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleantrueset to true to collect table statistics histograms on non-indexed JSON columnsServerless/Dedicated/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsServerless/Dedicated/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every CREATE STATISTICS jobServerless/Dedicated/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestServerless/Dedicated/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesServerless/Dedicated/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerServerless/Dedicated/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesServerless/Dedicated/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Serverless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Serverless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)Serverless/Dedicated/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Serverless/Dedicated/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
storage.experimental.eventually_file_only_snapshots.enabled
booleantrueset to false to disable eventually-file-only-snapshots (kv.snapshot_receiver.excise.enabled must also be false)Dedicated/Self-Hosted
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationDedicated/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashServerless/Dedicated/Self-Hosted (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationServerless/Dedicated/Self-Hosted
storage.sstable.compression_algorithm
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks; supported values: "snappy", "zstd" [snappy = 1, zstd = 2]Serverless/Dedicated/Self-Hosted (read-only)
storage.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in sstablesServerless/Dedicated/Self-Hosted
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationDedicated/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereDedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Serverless/Dedicated/Self-Hosted (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Serverless/Dedicated/Self-Hosted (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted
trace.span_registry.enabled
booleantrueif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeServerless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted
version
version24.1set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-24.1/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-24.1/sql/aggregates.md new file mode 100644 index 00000000000..3213f0f02a8 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/sql/aggregates.md @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.1/sql/functions.md b/src/current/_includes/cockroach-generated/release-24.1/sql/functions.md new file mode 100644 index 00000000000..df9e17317f3 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/sql/functions.md @@ -0,0 +1,3504 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns and array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(anyelement...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(anyelement...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → string

Returns set of index recommendations

+
Immutable
workload_index_recs(timestamptz: timestamptz) → string

Returns set of index recommendations

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(string...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, anyelement...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input on delimiter and return the value in the return_index_pos position (starting at 1).

+

For example, split_part('123.456.789.0','.',3)returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(anyelement...) → int

Return size in bytes of the column provided as an argument

+
Immutable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.1/sql/operators.md b/src/current/_includes/cockroach-generated/release-24.1/sql/operators.md new file mode 100644 index 00000000000..dde5d133a6c --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/sql/operators.md @@ -0,0 +1,635 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-24.1/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-24.1/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.1/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.2/eventlog.md b/src/current/_includes/cockroach-generated/release-24.2/eventlog.md new file mode 100644 index 00000000000..b315b0d73c9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/eventlog.md @@ -0,0 +1,3341 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobId` | The job id for enterprise changefeeds. | no | +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any Changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind) | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | yes | +| `TableName` | TableName is the name of the table on which the index was created. | yes | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | yes | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes sent by nodes for this query. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | + + + diff --git a/src/current/_includes/cockroach-generated/release-24.2/logformats.md b/src/current/_includes/cockroach-generated/release-24.2/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-24.2/logging.md b/src/current/_includes/cockroach-generated/release-24.2/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-24.2/settings/settings.html b/src/current/_includes/cockroach-generated/release-24.2/settings/settings.html new file mode 100644 index 00000000000..d848f855e51 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/settings/settings.html @@ -0,0 +1,315 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckDedicated/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingServerless/Dedicated/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlDedicated/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlServerless/Dedicated/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlServerless/Dedicated/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryServerless/Dedicated/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPServerless/Dedicated/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failServerless/Dedicated/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsServerless/Dedicated/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upDedicated/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Serverless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Serverless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted
changefeed.frontier_highwater_lag_checkpoint_threshold
duration10m0scontrols the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabledServerless/Dedicated/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedServerless/Dedicated/Self-Hosted
changefeed.min_highwater_advance
duration0sminimum amount of time the changefeed high water mark must advance for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier advances, as long as the rate of checkpointing keeps up with the rate of frontier changesServerless/Dedicated/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampServerless/Dedicated/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesServerless/Dedicated/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueServerless/Dedicated/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadServerless/Dedicated/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageServerless/Dedicated/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsServerless/Dedicated/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetServerless/Dedicated/Self-Hosted
cluster.organization
stringorganization nameDedicated/Self-hosted (read-write); Serverless (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetServerless/Dedicated/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipServerless/Dedicated/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Serverless/Dedicated/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesServerless/Dedicated/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedServerless/Dedicated/Self-Hosted
enterprise.license
stringthe encoded cluster licenseDedicated/Self-hosted (read-write); Serverless (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portServerless/Dedicated/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Serverless/Dedicated/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueServerless/Dedicated/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedServerless/Dedicated/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersDedicated/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyDedicated/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesDedicated/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Serverless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Serverless/Dedicated/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedServerless/Dedicated/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalDedicated/Self-hosted (read-write); Serverless (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledDedicated/Self-hosted (read-write); Serverless (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Dedicated/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Dedicated/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Dedicated/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsDedicated/Self-Hosted
kv.snapshot_receiver.excise.enabled
booleantrueset to false to disable excises in place of range deletions for KV snapshotsDedicated/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableServerless/Dedicated/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsServerless/Dedicated/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsServerless/Dedicated/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notServerless/Dedicated/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueDedicated/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleServerless/Dedicated/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userDedicated/Self-hosted (read-write); Serverless (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Serverless/Dedicated/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableServerless/Dedicated/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Serverless/Dedicated/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Serverless/Dedicated/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogServerless/Dedicated/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationServerless/Dedicated/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutServerless/Dedicated/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Serverless/Dedicated/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginServerless/Dedicated/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsServerless/Dedicated/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client.timeout
duration30ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretServerless/Dedicated/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleServerless/Dedicated/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Serverless/Dedicated/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedDedicated/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeServerless/Dedicated/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Serverless/Dedicated/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Dedicated/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outServerless/Dedicated/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadServerless/Dedicated/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMServerless/Dedicated/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptServerless/Dedicated/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Serverless/Dedicated/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Serverless/Dedicated/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costServerless/Dedicated/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesServerless/Dedicated/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectServerless/Dedicated/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipServerless/Dedicated/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdServerless/Dedicated/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheServerless/Dedicated/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsServerless/Dedicated/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeServerless/Dedicated/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Serverless/Dedicated/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedServerless/Dedicated/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedServerless/Dedicated/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.large_full_scan_rows
float1000default value for large_full_scan_rows session setting which determines the maximum table size allowed for a full scan when disallow_full_table_scans is set to true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableServerless/Dedicated/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableServerless/Dedicated/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesServerless/Dedicated/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsServerless/Dedicated/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemServerless/Dedicated/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Serverless/Dedicated/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsServerless/Dedicated/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeServerless/Dedicated/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Serverless/Dedicated/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Serverless/Dedicated/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.enabled
booleanfalseperiodically save a logical plan for each fingerprintServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.period
duration5m0sthe time until a new logical plan is collectedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Serverless/Dedicated/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsServerless/Dedicated/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Serverless/Dedicated/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionServerless/Dedicated/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentServerless/Dedicated/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityServerless/Dedicated/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobDedicated/Self-hosted (read-write); Serverless (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsServerless/Dedicated/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesServerless/Dedicated/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Serverless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsServerless/Dedicated/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleantrueset to true to collect table statistics histograms on non-indexed JSON columnsServerless/Dedicated/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsServerless/Dedicated/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every CREATE STATISTICS jobServerless/Dedicated/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestServerless/Dedicated/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesServerless/Dedicated/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerServerless/Dedicated/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesServerless/Dedicated/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Serverless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Serverless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)Serverless/Dedicated/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Serverless/Dedicated/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
storage.experimental.eventually_file_only_snapshots.enabled
booleantrueset to false to disable eventually-file-only-snapshots (kv.snapshot_receiver.excise.enabled must also be false)Dedicated/Self-Hosted
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationDedicated/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesServerless/Dedicated/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashDedicated/Self-hosted (read-write); Serverless (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationServerless/Dedicated/Self-Hosted
storage.sstable.compression_algorithm
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for use in a Pebble store; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup transport; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationDedicated/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereDedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted
trace.span_registry.enabled
booleantrueif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeServerless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted
version
version24.2set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-24.2/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-24.2/sql/aggregates.md new file mode 100644 index 00000000000..3213f0f02a8 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/sql/aggregates.md @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.2/sql/functions.md b/src/current/_includes/cockroach-generated/release-24.2/sql/functions.md new file mode 100644 index 00000000000..93e0c310821 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/sql/functions.md @@ -0,0 +1,3523 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(anyelement...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(anyelement...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → string

Returns set of index recommendations

+
Immutable
workload_index_recs(timestamptz: timestamptz) → string

Returns set of index recommendations

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(string...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, anyelement...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input on delimiter and return the value in the return_index_pos position (starting at 1).

+

For example, split_part('123.456.789.0','.',3)returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(anyelement...) → int

Return size in bytes of the column provided as an argument

+
Immutable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.2/sql/operators.md b/src/current/_includes/cockroach-generated/release-24.2/sql/operators.md new file mode 100644 index 00000000000..47027b064fe --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/sql/operators.md @@ -0,0 +1,658 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-24.2/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-24.2/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.2/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.3/eventlog.md b/src/current/_includes/cockroach-generated/release-24.3/eventlog.md new file mode 100644 index 00000000000..25a64aec4cf --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/eventlog.md @@ -0,0 +1,3436 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | yes | +| `TableName` | TableName is the name of the table on which the index was created. | yes | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | yes | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes sent by nodes for this query. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | + + + diff --git a/src/current/_includes/cockroach-generated/release-24.3/logformats.md b/src/current/_includes/cockroach-generated/release-24.3/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-24.3/logging.md b/src/current/_includes/cockroach-generated/release-24.3/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-24.3/settings/settings.html b/src/current/_includes/cockroach-generated/release-24.3/settings/settings.html new file mode 100644 index 00000000000..41cb4991d60 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/settings/settings.html @@ -0,0 +1,371 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckDedicated/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingServerless/Dedicated/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlDedicated/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlServerless/Dedicated/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlServerless/Dedicated/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryServerless/Dedicated/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPServerless/Dedicated/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failServerless/Dedicated/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsServerless/Dedicated/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upDedicated/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Serverless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Serverless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted
changefeed.frontier_highwater_lag_checkpoint_threshold
duration10m0scontrols the maximum the high-water mark is allowed to lag behind the leading spans of the frontier before per-span checkpointing is enabled; if 0, checkpointing due to high-water lag is disabledServerless/Dedicated/Self-Hosted
changefeed.kafka.max_request_size
byte size256 MiBthe maximum number of uncompressed bytes sent in a single request to a Kafka broker; lowering this value helps avoid spurious "message too large" errors that can occur when multiple messages are combined into a single batch; this setting is overridden by the per-changefeed Flush { MaxBytes: <int> } optionServerless/Dedicated/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleanfalseif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsServerless/Dedicated/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedServerless/Dedicated/Self-Hosted
changefeed.min_highwater_advance
duration0sminimum amount of time the changefeed high water mark must advance for it to be eligible for checkpointing; Default of 0 will checkpoint every time frontier advances, as long as the rate of checkpointing keeps up with the rate of frontier changesServerless/Dedicated/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampServerless/Dedicated/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesServerless/Dedicated/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueServerless/Dedicated/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadServerless/Dedicated/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageServerless/Dedicated/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsServerless/Dedicated/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetServerless/Dedicated/Self-Hosted
cluster.organization
stringorganization nameDedicated/Self-hosted (read-write); Serverless (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetServerless/Dedicated/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipServerless/Dedicated/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Serverless/Dedicated/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesServerless/Dedicated/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedServerless/Dedicated/Self-Hosted
enterprise.license
stringthe encoded cluster licenseDedicated/Self-hosted (read-write); Serverless (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portServerless/Dedicated/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Serverless/Dedicated/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueServerless/Dedicated/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedServerless/Dedicated/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersDedicated/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyDedicated/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesDedicated/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Serverless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Serverless/Dedicated/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.raft.leader_fortification.fraction_enabled
float0controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Dedicated/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsDedicated/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedServerless/Dedicated/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalDedicated/Self-hosted (read-write); Serverless (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledDedicated/Self-hosted (read-write); Serverless (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Dedicated/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Dedicated/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Dedicated/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsDedicated/Self-Hosted
kv.snapshot_receiver.excise.enabled
booleantrueset to false to disable excises in place of range deletions for KV snapshotsDedicated/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableServerless/Dedicated/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsServerless/Dedicated/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsServerless/Dedicated/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notServerless/Dedicated/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueDedicated/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleanfalseif set to true, snapshot ingests will be subject to disk write control in ACDedicated/Self-Hosted
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataServerless/Dedicated/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validServerless/Dedicated/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleServerless/Dedicated/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userDedicated/Self-hosted (read-write); Serverless (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Serverless/Dedicated/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableServerless/Dedicated/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Serverless/Dedicated/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Serverless/Dedicated/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Serverless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogServerless/Dedicated/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationServerless/Dedicated/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutServerless/Dedicated/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Serverless/Dedicated/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginServerless/Dedicated/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsServerless/Dedicated/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceServerless/Dedicated/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameServerless/Dedicated/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Serverless/Dedicated/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceServerless/Dedicated/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsServerless/Dedicated/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSServerless/Dedicated/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Serverless/Dedicated/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Serverless/Dedicated/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverServerless/Dedicated/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverServerless/Dedicated/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverServerless/Dedicated/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretServerless/Dedicated/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleServerless/Dedicated/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Serverless/Dedicated/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedDedicated/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeServerless/Dedicated/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Serverless/Dedicated/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Dedicated/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outServerless/Dedicated/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadServerless/Dedicated/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMServerless/Dedicated/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptServerless/Dedicated/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Serverless/Dedicated/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Serverless/Dedicated/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costServerless/Dedicated/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesServerless/Dedicated/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectServerless/Dedicated/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipServerless/Dedicated/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdServerless/Dedicated/Self-Hosted
sql.auth.skip_underlying_view_privilege_checks.enabled
booleantruedetermines whether to skip privilege checks on tables underlying views. When enabled, users with SELECT privileges on a view can query it regardless of their privileges on the underlying tables. This restores pre-v26.2 behavior.Serverless/Dedicated/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheServerless/Dedicated/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsServerless/Dedicated/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeServerless/Dedicated/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Serverless/Dedicated/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedServerless/Dedicated/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedServerless/Dedicated/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableServerless/Dedicated/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableServerless/Dedicated/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesServerless/Dedicated/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsServerless/Dedicated/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemServerless/Dedicated/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Serverless/Dedicated/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsServerless/Dedicated/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeServerless/Dedicated/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Serverless/Dedicated/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Serverless/Dedicated/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.enabled
booleanfalseperiodically save a logical plan for each fingerprintServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.period
duration5m0sthe time until a new logical plan is collectedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Serverless/Dedicated/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsServerless/Dedicated/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Serverless/Dedicated/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionServerless/Dedicated/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentServerless/Dedicated/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityServerless/Dedicated/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobDedicated/Self-hosted (read-write); Serverless (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsServerless/Dedicated/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleanfalseautomatic partial statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleantrueset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesServerless/Dedicated/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Serverless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsServerless/Dedicated/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleantrueset to true to collect table statistics histograms on non-indexed JSON columnsServerless/Dedicated/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsServerless/Dedicated/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every CREATE STATISTICS jobServerless/Dedicated/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestServerless/Dedicated/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesServerless/Dedicated/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerServerless/Dedicated/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesServerless/Dedicated/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Serverless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Serverless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)Serverless/Dedicated/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Serverless/Dedicated/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
storage.columnar_blocks.enabled
booleanfalseset to true to enable columnar-blocks to store KVs in a columnar format (experimental)Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsDedicated/Self-hosted (read-write); Serverless (read-only)
storage.experimental.eventually_file_only_snapshots.enabled
booleantrueset to false to disable eventually-file-only-snapshots (kv.snapshot_receiver.excise.enabled must also be false)Dedicated/Self-Hosted
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationDedicated/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesServerless/Dedicated/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashDedicated/Self-hosted (read-write); Serverless (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationServerless/Dedicated/Self-Hosted
storage.sstable.compression_algorithm
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for use in a Pebble store; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup transport; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationDedicated/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereDedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted
trace.span_registry.enabled
booleantrueif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted
version
version24.3set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-24.3/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-24.3/sql/aggregates.md new file mode 100644 index 00000000000..3213f0f02a8 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/sql/aggregates.md @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.3/sql/functions.md b/src/current/_includes/cockroach-generated/release-24.3/sql/functions.md new file mode 100644 index 00000000000..1e734501429 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/sql/functions.md @@ -0,0 +1,3523 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(anyelement...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(anyelement...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → string

Returns set of index recommendations

+
Immutable
workload_index_recs(timestamptz: timestamptz) → string

Returns set of index recommendations

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(anyelement...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, anyelement...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input on delimiter and return the value in the return_index_pos position (starting at 1).

+

For example, split_part('123.456.789.0','.',3)returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(anyelement...) → int

Return size in bytes of the column provided as an argument

+
Immutable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-24.3/sql/operators.md b/src/current/_includes/cockroach-generated/release-24.3/sql/operators.md new file mode 100644 index 00000000000..47027b064fe --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/sql/operators.md @@ -0,0 +1,658 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-24.3/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-24.3/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-24.3/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.1/eventlog.md b/src/current/_includes/cockroach-generated/release-25.1/eventlog.md new file mode 100644 index 00000000000..99bf55ac05d --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/eventlog.md @@ -0,0 +1,3436 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | +| `Databases` | Databases for the range. | yes | +| `Tables` | Tables for the range | yes | +| `Indexes` | Indexes for the range | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes sent by nodes for this query. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | + + + diff --git a/src/current/_includes/cockroach-generated/release-25.1/logformats.md b/src/current/_includes/cockroach-generated/release-25.1/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-25.1/logging.md b/src/current/_includes/cockroach-generated/release-25.1/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-25.1/settings/settings.html b/src/current/_includes/cockroach-generated/release-25.1/settings/settings.html new file mode 100644 index 00000000000..f2c718ec58b --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/settings/settings.html @@ -0,0 +1,371 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckDedicated/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingServerless/Dedicated/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlDedicated/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlServerless/Dedicated/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlServerless/Dedicated/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryServerless/Dedicated/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPServerless/Dedicated/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failServerless/Dedicated/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsServerless/Dedicated/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upDedicated/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Serverless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Serverless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted
changefeed.span_checkpoint.lag_threshold
(alias: changefeed.frontier_highwater_lag_checkpoint_threshold)
duration10m0sthe amount of time a changefeed's lagging (slowest) spans must lag behind its leading (fastest) spans before a span-level checkpoint to save leading span progress is written; if 0, span-level checkpoints due to lagging spans is disabledServerless/Dedicated/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleanfalseif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsServerless/Dedicated/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedServerless/Dedicated/Self-Hosted
changefeed.resolved_timestamp.min_update_interval
(alias: changefeed.min_highwater_advance)
duration0sminimum amount of time that must have elapsed since the last time a changefeed's resolved timestamp was updated before it is eligible to be updated again; default of 0 means no minimum interval is enforced but updating will still be limited by the average time it takes to checkpoint progressServerless/Dedicated/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampServerless/Dedicated/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesServerless/Dedicated/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueServerless/Dedicated/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadServerless/Dedicated/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageServerless/Dedicated/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsServerless/Dedicated/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetServerless/Dedicated/Self-Hosted
cluster.organization
stringorganization nameDedicated/Self-hosted (read-write); Serverless (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetServerless/Dedicated/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipServerless/Dedicated/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Serverless/Dedicated/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesServerless/Dedicated/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedServerless/Dedicated/Self-Hosted
enterprise.license
stringthe encoded cluster licenseDedicated/Self-hosted (read-write); Serverless (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portServerless/Dedicated/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Serverless/Dedicated/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueServerless/Dedicated/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedServerless/Dedicated/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersDedicated/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyDedicated/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesDedicated/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_io_write.min_capacity_remaining_fraction
float0.05remaining store capacity fraction below which bulk ingestion requests are rejectedDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Serverless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Serverless/Dedicated/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.raft.leader_fortification.fraction_enabled
float0controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Dedicated/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsDedicated/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedServerless/Dedicated/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalDedicated/Self-hosted (read-write); Serverless (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledDedicated/Self-hosted (read-write); Serverless (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Dedicated/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Dedicated/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Dedicated/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsDedicated/Self-Hosted
kv.snapshot_receiver.excise.enabled
booleantrueset to false to disable excises in place of range deletions for KV snapshotsDedicated/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableServerless/Dedicated/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsServerless/Dedicated/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsServerless/Dedicated/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notServerless/Dedicated/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueDedicated/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleantrueif set to true, snapshot ingests will be subject to disk write control in ACDedicated/Self-Hosted
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataServerless/Dedicated/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validServerless/Dedicated/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleServerless/Dedicated/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userDedicated/Self-hosted (read-write); Serverless (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Serverless/Dedicated/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableServerless/Dedicated/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Serverless/Dedicated/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Serverless/Dedicated/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Serverless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogServerless/Dedicated/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationServerless/Dedicated/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutServerless/Dedicated/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Serverless/Dedicated/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginServerless/Dedicated/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsServerless/Dedicated/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceServerless/Dedicated/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameServerless/Dedicated/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Serverless/Dedicated/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceServerless/Dedicated/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsServerless/Dedicated/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSServerless/Dedicated/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Serverless/Dedicated/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Serverless/Dedicated/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverServerless/Dedicated/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverServerless/Dedicated/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverServerless/Dedicated/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretServerless/Dedicated/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleServerless/Dedicated/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Serverless/Dedicated/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedDedicated/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeServerless/Dedicated/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Serverless/Dedicated/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Dedicated/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outServerless/Dedicated/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadServerless/Dedicated/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMServerless/Dedicated/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptServerless/Dedicated/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Serverless/Dedicated/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Serverless/Dedicated/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costServerless/Dedicated/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesServerless/Dedicated/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectServerless/Dedicated/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipServerless/Dedicated/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdServerless/Dedicated/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheServerless/Dedicated/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsServerless/Dedicated/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeServerless/Dedicated/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Serverless/Dedicated/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedServerless/Dedicated/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedServerless/Dedicated/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableServerless/Dedicated/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableServerless/Dedicated/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesServerless/Dedicated/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsServerless/Dedicated/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemServerless/Dedicated/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Serverless/Dedicated/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsServerless/Dedicated/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeServerless/Dedicated/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Serverless/Dedicated/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Serverless/Dedicated/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.enabled
booleanfalseperiodically save a logical plan for each fingerprintServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.plan_collection.period
duration5m0sthe time until a new logical plan is collectedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Serverless/Dedicated/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsServerless/Dedicated/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Serverless/Dedicated/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionServerless/Dedicated/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentServerless/Dedicated/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityServerless/Dedicated/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobDedicated/Self-hosted (read-write); Serverless (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsServerless/Dedicated/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleantrueautomatic partial statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleantrueset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesServerless/Dedicated/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Serverless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsServerless/Dedicated/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleanfalseset to true to collect table statistics histograms on non-indexed JSON columnsServerless/Dedicated/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsServerless/Dedicated/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every successful CREATE STATISTICS jobServerless/Dedicated/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestServerless/Dedicated/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesServerless/Dedicated/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerServerless/Dedicated/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesServerless/Dedicated/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Serverless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Serverless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables tracing on all transactions; transactions open for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting is coarser-grained than sql.trace.stmt.enable_threshold because it applies to all statements within a transaction as well as client communication (e.g. retries)Serverless/Dedicated/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Serverless/Dedicated/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
storage.columnar_blocks.enabled
booleantrueset to true to enable columnar-blocks to store KVs in a columnar formatDedicated/Self-hosted (read-write); Serverless (read-only)
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsDedicated/Self-hosted (read-write); Serverless (read-only)
storage.experimental.eventually_file_only_snapshots.enabled
booleantrueset to false to disable eventually-file-only-snapshots (kv.snapshot_receiver.excise.enabled must also be false)Dedicated/Self-Hosted
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationDedicated/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesServerless/Dedicated/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashDedicated/Self-hosted (read-write); Serverless (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationServerless/Dedicated/Self-Hosted
storage.sstable.compression_algorithm
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for use in a Pebble store; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup transport; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationDedicated/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereDedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted
trace.span_registry.enabled
booleanfalseif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeServerless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted
version
version25.1set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-25.1/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-25.1/sql/aggregates.md new file mode 100644 index 00000000000..3213f0f02a8 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/sql/aggregates.md @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.1/sql/functions.md b/src/current/_includes/cockroach-generated/release-25.1/sql/functions.md new file mode 100644 index 00000000000..1e734501429 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/sql/functions.md @@ -0,0 +1,3523 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(anyelement...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(anyelement...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(anyelement...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(anyelement...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → string

Returns set of index recommendations

+
Immutable
workload_index_recs(timestamptz: timestamptz) → string

Returns set of index recommendations

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(anyelement...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, anyelement...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input on delimiter and return the value in the return_index_pos position (starting at 1).

+

For example, split_part('123.456.789.0','.',3)returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(anyelement...) → int

Return size in bytes of the column provided as an argument

+
Immutable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.1/sql/operators.md b/src/current/_includes/cockroach-generated/release-25.1/sql/operators.md new file mode 100644 index 00000000000..73e12287102 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/sql/operators.md @@ -0,0 +1,662 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
collatedstring{*} < collatedstring{*}bool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
collatedstring{*} <= collatedstring{*}bool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
collatedstring{*} = collatedstring{*}bool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
collatedstring{*} IS NOT DISTINCT FROM collatedstring{*}bool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-25.1/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-25.1/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.1/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.2/eventlog.md b/src/current/_includes/cockroach-generated/release-25.2/eventlog.md new file mode 100644 index 00000000000..8960a62b59b --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/eventlog.md @@ -0,0 +1,3526 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `changefeed_canceled` + +An event of type `changefeed_canceled` is an event for any changefeed cancellations. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `low_disk_space` + +An event of type `low_disk_space` is emitted when a store is reaching capacity, as we reach +certain thresholds. It is emitted periodically while we are in a low disk +state. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | +| `PercentThreshold` | The free space percent threshold that we went under. | no | +| `AvailableBytes` | | no | +| `TotalBytes` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | +| `Databases` | Databases for the range. | no | +| `Tables` | Tables for the range | no | +| `Indexes` | Indexes for the range | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_policy` + +An event of type `create_policy` is recorded when a policy is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the created policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_policy` + +An event of type `drop_policy` is recorded when a policy is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the dropped policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network by DistSQL components. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network by DistSQL components. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes by DistSQL components. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query by DistSQL components. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | + + + diff --git a/src/current/_includes/cockroach-generated/release-25.2/logformats.md b/src/current/_includes/cockroach-generated/release-25.2/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-25.2/logging.md b/src/current/_includes/cockroach-generated/release-25.2/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-25.2/settings/settings.html b/src/current/_includes/cockroach-generated/release-25.2/settings/settings.html new file mode 100644 index 00000000000..92e53189ec5 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/settings/settings.html @@ -0,0 +1,383 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckDedicated/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingServerless/Dedicated/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingServerless/Dedicated/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlDedicated/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlServerless/Dedicated/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlServerless/Dedicated/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryServerless/Dedicated/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPServerless/Dedicated/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failServerless/Dedicated/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsServerless/Dedicated/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upDedicated/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Serverless/Dedicated/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillServerless/Dedicated/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestServerless/Dedicated/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesServerless/Dedicated/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Serverless/Dedicated/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferServerless/Dedicated/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledServerless/Dedicated/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationServerless/Dedicated/Self-Hosted
changefeed.span_checkpoint.lag_threshold
(alias: changefeed.frontier_highwater_lag_checkpoint_threshold)
duration10m0sthe amount of time a changefeed's lagging (slowest) spans must lag behind its leading (fastest) spans before a span-level checkpoint to save leading span progress is written; if 0, span-level checkpoints due to lagging spans is disabledServerless/Dedicated/Self-Hosted
changefeed.kafka.max_request_size
byte size256 MiBthe maximum number of uncompressed bytes sent in a single request to a Kafka broker; lowering this value helps avoid spurious "message too large" errors that can occur when multiple messages are combined into a single batch; this setting is overridden by the per-changefeed Flush { MaxBytes: <int> } optionServerless/Dedicated/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleantrueif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsServerless/Dedicated/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedServerless/Dedicated/Self-Hosted
changefeed.resolved_timestamp.min_update_interval
(alias: changefeed.min_highwater_advance)
duration0sminimum amount of time that must have elapsed since the last time a changefeed's resolved timestamp was updated before it is eligible to be updated again; default of 0 means no minimum interval is enforced but updating will still be limited by the average time it takes to checkpoint progressServerless/Dedicated/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsServerless/Dedicated/Self-Hosted
changefeed.partition_alg.enabled
booleanfalseif enabled, allows specifying the partition_alg changefeed option to choose between fnv-1a (default) and murmur2 hash functions for Kafka partitioning. Only affects changefeeds using a kafka sink with changefeed.new_kafka_sink_enabled set to true.Serverless/Dedicated/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationServerless/Dedicated/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampServerless/Dedicated/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesServerless/Dedicated/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueServerless/Dedicated/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadServerless/Dedicated/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageServerless/Dedicated/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsServerless/Dedicated/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroServerless/Dedicated/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetServerless/Dedicated/Self-Hosted
cluster.organization
stringorganization nameDedicated/Self-hosted (read-write); Serverless (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetServerless/Dedicated/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipServerless/Dedicated/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Serverless/Dedicated/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenDedicated/Self-hosted (read-write); Serverless (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesServerless/Dedicated/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedServerless/Dedicated/Self-Hosted
enterprise.license
stringthe encoded cluster licenseDedicated/Self-hosted (read-write); Serverless (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portServerless/Dedicated/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Serverless/Dedicated/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueServerless/Dedicated/Self-Hosted
feature.vector_index.enabled
booleanfalseset to true to enable vector indexes, false to disable; default is falseServerless/Dedicated/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedServerless/Dedicated/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersDedicated/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyDedicated/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Dedicated/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesDedicated/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullDedicated/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsDedicated/Self-Hosted
kv.bulk_io_write.min_capacity_remaining_fraction
float0.05remaining store capacity fraction below which bulk ingestion requests are rejectedDedicated/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.lead_for_global_reads_auto_tune.enabled
booleanfalseif enabled, observed network latency between leaseholders and their furthest follower will be used to adjust closed timestamp policies for rangesranges configured to serve global reads. kv.closed_timestamp.lead_for_global_reads_override takes precedence if set.Dedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportDedicated/Self-hosted (read-write); Serverless (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationDedicated/Self-hosted (read-write); Serverless (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Serverless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesServerless/Dedicated/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Serverless/Dedicated/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Dedicated/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologDedicated/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsDedicated/Self-hosted (read-write); Serverless (read-only)
kv.raft.leader_fortification.fraction_enabled
float1controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Dedicated/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsDedicated/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedDedicated/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingDedicated/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedServerless/Dedicated/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalDedicated/Self-hosted (read-write); Serverless (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledDedicated/Self-hosted (read-write); Serverless (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Dedicated/Self-Hosted
kv.replica_raft.leaderless_unavailable_threshold
duration1m0sduration after which leaderless replicas is considered unavailable. Set to 0 to disable leaderless replica availability checksDedicated/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Dedicated/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Dedicated/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsDedicated/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableServerless/Dedicated/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsServerless/Dedicated/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsServerless/Dedicated/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notServerless/Dedicated/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusServerless/Dedicated/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueDedicated/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleantrueif set to true, snapshot ingests will be subject to disk write control in ACDedicated/Self-Hosted
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataServerless/Dedicated/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validServerless/Dedicated/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleServerless/Dedicated/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userDedicated/Self-hosted (read-write); Serverless (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Serverless/Dedicated/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableServerless/Dedicated/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Serverless/Dedicated/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Serverless/Dedicated/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationServerless/Dedicated/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsServerless/Dedicated/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Serverless/Dedicated/Self-Hosted
server.client_cert_expiration_cache.capacity
integer1000the maximum number of client cert expirations storedServerless/Dedicated/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicServerless/Dedicated/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Serverless/Dedicated/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Dedicated/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogServerless/Dedicated/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationServerless/Dedicated/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutServerless/Dedicated/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Serverless/Dedicated/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginServerless/Dedicated/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsServerless/Dedicated/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceServerless/Dedicated/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameServerless/Dedicated/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Serverless/Dedicated/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceServerless/Dedicated/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsServerless/Dedicated/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSServerless/Dedicated/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Serverless/Dedicated/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Serverless/Dedicated/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverServerless/Dedicated/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverServerless/Dedicated/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverServerless/Dedicated/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesServerless/Dedicated/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesServerless/Dedicated/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Serverless/Dedicated/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointServerless/Dedicated/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idServerless/Dedicated/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretServerless/Dedicated/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleServerless/Dedicated/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.provider.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while authenticating through the OIDC providerServerless/Dedicated/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Serverless/Dedicated/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Serverless/Dedicated/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedDedicated/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeServerless/Dedicated/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Serverless/Dedicated/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Dedicated/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Serverless/Dedicated/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outServerless/Dedicated/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadServerless/Dedicated/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMServerless/Dedicated/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptServerless/Dedicated/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Serverless/Dedicated/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Serverless/Dedicated/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Serverless/Dedicated/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costServerless/Dedicated/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Serverless/Dedicated/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Serverless/Dedicated/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedServerless/Dedicated/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validServerless/Dedicated/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsDedicated/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantDedicated/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesDedicated/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesServerless/Dedicated/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectServerless/Dedicated/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipServerless/Dedicated/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdServerless/Dedicated/Self-Hosted
sql.auth.skip_underlying_view_privilege_checks.enabled
booleantruedetermines whether to skip privilege checks on tables underlying views. When enabled, users with SELECT privileges on a view can query it regardless of their privileges on the underlying tables, and row-level security policies are evaluated as the invoking user rather than the view owner. This restores pre-v26.2 behavior.Serverless/Dedicated/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheServerless/Dedicated/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsServerless/Dedicated/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeServerless/Dedicated/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsServerless/Dedicated/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Serverless/Dedicated/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedServerless/Dedicated/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedServerless/Dedicated/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedServerless/Dedicated/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Serverless/Dedicated/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableServerless/Dedicated/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableServerless/Dedicated/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableServerless/Dedicated/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationServerless/Dedicated/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesServerless/Dedicated/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsServerless/Dedicated/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemServerless/Dedicated/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Serverless/Dedicated/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsServerless/Dedicated/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Serverless/Dedicated/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeServerless/Dedicated/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Serverless/Dedicated/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Serverless/Dedicated/Self-Hosted
sql.metrics.application_name.enabled
booleanfalsewhen enabled, SQL metrics would export application name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Serverless/Dedicated/Self-Hosted
sql.metrics.database_name.enabled
booleanfalsewhen enabled, SQL metrics would export database name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Serverless/Dedicated/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Serverless/Dedicated/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryServerless/Dedicated/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Serverless/Dedicated/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsServerless/Dedicated/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Serverless/Dedicated/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionServerless/Dedicated/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentServerless/Dedicated/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityServerless/Dedicated/Self-Hosted
sql.schema.auto_unlock.enabled
booleantruecontrols whether DDL operations will attempt to automatically unlock and re-lock schema_locked tables. When this setting is false, DDL on schema_locked tables is blocked unless the user manually unlocks the table first. The schema_locked storage parameter improves changefeed performance by locking the table's schema from the perspective of the changefeed.Serverless/Dedicated/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobDedicated/Self-hosted (read-write); Serverless (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsServerless/Dedicated/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_full_collection.enabled
booleantrueautomatic full statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleantrueautomatic partial statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshServerless/Dedicated/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobServerless/Dedicated/Self-Hosted
sql.stats.detailed_latency_metrics.enabled
booleanfalselabel latency metrics with the statement fingerprint. Workloads with tens of thousands of distinct query fingerprints should leave this setting false. (experimental, affects performance for workloads with high fingerprint cardinality)Serverless/Dedicated/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleantrueset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themServerless/Dedicated/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskServerless/Dedicated/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourServerless/Dedicated/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesServerless/Dedicated/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastServerless/Dedicated/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsServerless/Dedicated/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesServerless/Dedicated/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeServerless/Dedicated/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Serverless/Dedicated/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeServerless/Dedicated/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsServerless/Dedicated/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleanfalseset to true to collect table statistics histograms on non-indexed JSON columnsServerless/Dedicated/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsServerless/Dedicated/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every successful CREATE STATISTICS jobServerless/Dedicated/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestServerless/Dedicated/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesServerless/Dedicated/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerServerless/Dedicated/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesServerless/Dedicated/Self-Hosted
sql.stats.table_statistics_cache.capacity
integer256the maximum number of table statistics entries stored in the LRU cache. Each cache entry corresponds to a single table.Serverless/Dedicated/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingServerless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Serverless/Dedicated/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Serverless/Dedicated/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Serverless/Dedicated/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsServerless/Dedicated/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upServerless/Dedicated/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsServerless/Dedicated/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdServerless/Dedicated/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables transaction traces for transactions exceeding this duration, used with `sql.trace.txn.sample_rate`Serverless/Dedicated/Self-Hosted
sql.trace.txn.sample_rate
float1enables probabilistic transaction tracing. It should be used in conjunction with `sql.trace.txn.enable_threshold`. A percentage of transactions between 0 and 1.0 will have tracing enabled, and only those which exceed the configured threshold will be logged.Serverless/Dedicated/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Serverless/Dedicated/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobServerless/Dedicated/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Serverless/Dedicated/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledServerless/Dedicated/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsServerless/Dedicated/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedServerless/Dedicated/Self-Hosted
sql.vecindex.stalled_op.timeout
duration100msamount of time before other vector index workers will assist with a stalled background fixupServerless/Dedicated/Self-Hosted
storage.columnar_blocks.enabled
booleantrueset to true to enable columnar-blocks to store KVs in a columnar formatDedicated/Self-hosted (read-write); Serverless (read-only)
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsDedicated/Self-hosted (read-write); Serverless (read-only)
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationDedicated/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesServerless/Dedicated/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashDedicated/Self-hosted (read-write); Serverless (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationServerless/Dedicated/Self-Hosted
storage.sstable.compression_algorithm
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for use in a Pebble store; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationsnappydetermines the compression algorithm to use when compressing sstable data blocks for backup transport; [snappy = 1, zstd = 2, none = 3]Dedicated/Self-hosted (read-write); Serverless (read-only)
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationDedicated/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereDedicated/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Dedicated/Self-hosted (read-write); Serverless (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsServerless/Dedicated/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Serverless/Dedicated/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedServerless/Dedicated/Self-Hosted
trace.span_registry.enabled
booleanfalseif set, ongoing traces can be seen at https://<ui>/#/debug/tracezServerless/Dedicated/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Serverless/Dedicated/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeServerless/Dedicated/Self-Hosted
ui.default_timezone
stringthe default timezone used to format timestamps in the uiServerless/Dedicated/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui. This setting is deprecatedand will be removed in a future version. Use the 'ui.default_timezone' setting instead. 'ui.default_timezone' takes precedence over this setting. [etc/utc = 0, america/new_york = 1]Serverless/Dedicated/Self-Hosted
version
version25.2set the active cluster version in the format '<major>.<minor>'Serverless/Dedicated/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-25.2/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-25.2/sql/aggregates.md new file mode 100644 index 00000000000..3213f0f02a8 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/sql/aggregates.md @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.2/sql/functions.md b/src/current/_includes/cockroach-generated/release-25.2/sql/functions.md new file mode 100644 index 00000000000..7c695326553 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/sql/functions.md @@ -0,0 +1,3611 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(any...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(any...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2. Returns 0 or count+1 for an input outside that range.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Jsonpath functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
jsonb_path_exists(target: jsonb, path: jsonpath) → bool

Checks whether the JSON path returns any item for the specified JSON value.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression. If the silent +argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.)

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression. If the +silent argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, +the function suppresses the following errors: missing object field or array +element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
workload_index_recs(timestamptz: timestamptz) → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(any...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string, any...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, any...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input using delimiter and returns the field at return_index_pos (starting from 1). If return_index_pos is negative, it returns the |return_index_pos|'th field from the end.

+

For example, split_part('123.456.789.0', '.', 3) returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring_index(input: string, delim: string, count: int) → string

Returns a substring of input before count occurrences of delim. +If count is positive, the leftmost part is returned. If count is negative, the rightmost part is returned.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(any...) → int

Return size in bytes of the column provided as an argument

+
Stable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.2/sql/operators.md b/src/current/_includes/cockroach-generated/release-25.2/sql/operators.md new file mode 100644 index 00000000000..0adf385f4c5 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/sql/operators.md @@ -0,0 +1,663 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
collatedstring{*} < collatedstring{*}bool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
collatedstring{*} <= collatedstring{*}bool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
collatedstring{*} = collatedstring{*}bool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
collatedstring{*} IS NOT DISTINCT FROM collatedstring{*}bool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
jsonpath IS NOT DISTINCT FROM jsonpathbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + +
LIKEReturn
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-25.2/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-25.2/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.2/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.3/eventlog.md b/src/current/_includes/cockroach-generated/release-25.3/eventlog.md new file mode 100644 index 00000000000..a165d7db3eb --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/eventlog.md @@ -0,0 +1,3549 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `alter_changefeed` + +An event of type `alter_changefeed` is an event for any ALTER CHANGEFEED statements that are run. + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescription` | The description of the changefeed job before the ALTER CHANGEFEED. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_canceled` + +An event of type `changefeed_canceled` is an event for any changefeed cancellations. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `low_disk_space` + +An event of type `low_disk_space` is emitted when a store is reaching capacity, as we reach +certain thresholds. It is emitted periodically while we are in a low disk +state. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | +| `PercentThreshold` | The free space percent threshold that we went under. | no | +| `AvailableBytes` | | no | +| `TotalBytes` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | +| `Databases` | Databases for the range. | no | +| `Tables` | Tables for the range | no | +| `Indexes` | Indexes for the range | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_policy` + +An event of type `create_policy` is recorded when a policy is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the created policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_policy` + +An event of type `drop_policy` is recorded when a policy is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the dropped policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationSubdirType` | DestinationSubdirType is - latest: if using the latest subdir - standard: if using a date-based subdir - custom: if using a custom subdir that's not date-based | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `AsOfInterval` | AsOfInterval is the time interval in nanoseconds between the statement timestamp and the timestamp resolved by the AS OF SYSTEM TIME expression. The interval is expressed in nanoseconds. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `DebugPauseOn` | DebugPauseOn is the type of event that the restore should pause on for debugging purposes. Currently only "error" is supported. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `OnExecutionFailure` | OnExecutionFailure describes the desired behavior if the schedule fails to execute. | no | +| `OnPreviousRunning` | OnPreviousRunning describes the desired behavior if the previously scheduled BACKUP is still running. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network by DistSQL components. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network by DistSQL components. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes by DistSQL components. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query by DistSQL components. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | +| 10 | PROVISIONING_ERROR | is used for errors during the user provisioning phase. This would include errors when the transaction to provision the authenticating user failed to execute. | + + + diff --git a/src/current/_includes/cockroach-generated/release-25.3/logformats.md b/src/current/_includes/cockroach-generated/release-25.3/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-25.3/logging.md b/src/current/_includes/cockroach-generated/release-25.3/logging.md new file mode 100644 index 00000000000..8b628dc2dd9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/logging.md @@ -0,0 +1,179 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + diff --git a/src/current/_includes/cockroach-generated/release-25.3/settings/settings.html b/src/current/_includes/cockroach-generated/release-25.3/settings/settings.html new file mode 100644 index 00000000000..7b8d5c09fb4 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/settings/settings.html @@ -0,0 +1,380 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckAdvanced/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingBasic/Standard/Advanced/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlAdvanced/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
bulkio.backup.deprecated_full_backup_with_subdir.enabled
booleanfalsewhen true, a backup command with a user specified subdirectory will create a full backup at the subdirectory if no backup already exists at that subdirectoryBasic/Standard/Advanced/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsBasic/Standard/Advanced/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upAdvanced/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Basic/Standard/Advanced/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillBasic/Standard/Advanced/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestBasic/Standard/Advanced/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesBasic/Standard/Advanced/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Basic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferBasic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationBasic/Standard/Advanced/Self-Hosted
changefeed.span_checkpoint.lag_threshold
(alias: changefeed.frontier_highwater_lag_checkpoint_threshold)
duration10m0sthe amount of time a changefeed's lagging (slowest) spans must lag behind its leading (fastest) spans before a span-level checkpoint to save leading span progress is written; if 0, span-level checkpoints due to lagging spans is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleantrueif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsBasic/Standard/Advanced/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedBasic/Standard/Advanced/Self-Hosted
changefeed.resolved_timestamp.min_update_interval
(alias: changefeed.min_highwater_advance)
duration0sminimum amount of time that must have elapsed since the last time a changefeed's resolved timestamp was updated before it is eligible to be updated again; default of 0 means no minimum interval is enforced but updating will still be limited by the average time it takes to checkpoint progressBasic/Standard/Advanced/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampBasic/Standard/Advanced/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesBasic/Standard/Advanced/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetBasic/Standard/Advanced/Self-Hosted
cluster.organization
stringorganization nameAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetBasic/Standard/Advanced/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipBasic/Standard/Advanced/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Basic/Standard/Advanced/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesBasic/Standard/Advanced/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedBasic/Standard/Advanced/Self-Hosted
enterprise.license
stringthe encoded cluster licenseAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portBasic/Standard/Advanced/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Basic/Standard/Advanced/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.infer_rbr_region_col_using_constraint.enabled
booleanfalseset to true to enable looking up the region column via a foreign key constraint in a REGIONAL BY ROW table, false to disable; default is falseBasic/Standard/Advanced/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.vector_index.enabled
booleanfalseset to true to enable vector indexes, false to disable; default is falseBasic/Standard/Advanced/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedBasic/Standard/Advanced/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersAdvanced/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyAdvanced/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesAdvanced/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsAdvanced/Self-Hosted
kv.bulk_io_write.min_capacity_remaining_fraction
float0.05remaining store capacity fraction below which bulk ingestion requests are rejectedAdvanced/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_auto_tune.enabled
booleanfalseif enabled, observed network latency between leaseholders and their furthest follower will be used to adjust closed timestamp policies for rangesranges configured to serve global reads. kv.closed_timestamp.lead_for_global_reads_override takes precedence if set.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Basic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Basic/Standard/Advanced/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologAdvanced/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.raft.leader_fortification.fraction_enabled
float1controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Advanced/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsAdvanced/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedAdvanced/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedBasic/Standard/Advanced/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Advanced/Self-Hosted
kv.replica_raft.leaderless_unavailable_threshold
duration1m0sduration after which leaderless replicas is considered unavailable. Set to 0 to disable leaderless replica availability checksAdvanced/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Advanced/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Advanced/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsAdvanced/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notBasic/Standard/Advanced/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueAdvanced/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleantrueif set to true, snapshot ingests will be subject to disk write control in ACAdvanced/Self-Hosted
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataBasic/Standard/Advanced/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validBasic/Standard/Advanced/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleBasic/Standard/Advanced/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Basic/Standard/Advanced/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableBasic/Standard/Advanced/Self-Hosted
security.provisioning.ldap.enabled
booleanfalseenables automatic creation of SQL users upon successful LDAP loginBasic/Standard/Advanced/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Basic/Standard/Advanced/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Basic/Standard/Advanced/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationBasic/Standard/Advanced/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsBasic/Standard/Advanced/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Basic/Standard/Advanced/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicBasic/Standard/Advanced/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Basic/Standard/Advanced/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Advanced/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogBasic/Standard/Advanced/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationBasic/Standard/Advanced/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutBasic/Standard/Advanced/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Basic/Standard/Advanced/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginBasic/Standard/Advanced/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Basic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesBasic/Standard/Advanced/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesBasic/Standard/Advanced/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while authenticating through the OIDC providerBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Basic/Standard/Advanced/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedAdvanced/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeBasic/Standard/Advanced/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Basic/Standard/Advanced/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Advanced/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outBasic/Standard/Advanced/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadBasic/Standard/Advanced/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMBasic/Standard/Advanced/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptBasic/Standard/Advanced/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Basic/Standard/Advanced/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Basic/Standard/Advanced/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costBasic/Standard/Advanced/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Basic/Standard/Advanced/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Basic/Standard/Advanced/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validBasic/Standard/Advanced/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsAdvanced/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantAdvanced/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesAdvanced/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipBasic/Standard/Advanced/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Basic/Standard/Advanced/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableBasic/Standard/Advanced/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesBasic/Standard/Advanced/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsBasic/Standard/Advanced/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemBasic/Standard/Advanced/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Basic/Standard/Advanced/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsBasic/Standard/Advanced/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeBasic/Standard/Advanced/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Basic/Standard/Advanced/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Basic/Standard/Advanced/Self-Hosted
sql.metrics.application_name.enabled
booleanfalsewhen enabled, SQL metrics would export application name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.database_name.enabled
booleanfalsewhen enabled, SQL metrics would export database name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Basic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Basic/Standard/Advanced/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsBasic/Standard/Advanced/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Basic/Standard/Advanced/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionBasic/Standard/Advanced/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentBasic/Standard/Advanced/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityBasic/Standard/Advanced/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsBasic/Standard/Advanced/Self-Hosted
sql.sqlcommenter.enabled
booleanfalseenables support for sqlcommenter. Key value parsed from sqlcommenter comments will be included in sql insights and sql logs. See https://google.github.io/sqlcommenter/ for more details.Basic/Standard/Advanced/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_full_collection.enabled
booleantrueautomatic full statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleantrueautomatic partial statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobBasic/Standard/Advanced/Self-Hosted
sql.stats.detailed_latency_metrics.enabled
booleanfalselabel latency metrics with the statement fingerprint. Workloads with tens of thousands of distinct query fingerprints should leave this setting false. (experimental, affects performance for workloads with high fingerprint cardinality)Basic/Standard/Advanced/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleanfalseset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Basic/Standard/Advanced/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleanfalseset to true to collect table statistics histograms on non-indexed JSON columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsBasic/Standard/Advanced/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every successful CREATE STATISTICS jobBasic/Standard/Advanced/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestBasic/Standard/Advanced/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Basic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsBasic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upBasic/Standard/Advanced/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsBasic/Standard/Advanced/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables transaction traces for transactions exceeding this duration, used with `sql.trace.txn.sample_rate`Basic/Standard/Advanced/Self-Hosted
sql.trace.txn.sample_rate
float1enables probabilistic transaction tracing. It should be used in conjunction with `sql.trace.txn.enable_threshold`. A percentage of transactions between 0 and 1.0 will have tracing enabled, and only those which exceed the configured threshold will be logged.Basic/Standard/Advanced/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledBasic/Standard/Advanced/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedBasic/Standard/Advanced/Self-Hosted
sql.vecindex.stalled_op.timeout
duration100msamount of time before other vector index workers will assist with a stalled background fixupBasic/Standard/Advanced/Self-Hosted
storage.columnar_blocks.enabled
booleantrueset to true to enable columnar-blocks to store KVs in a columnar formatAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationAdvanced/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesBasic/Standard/Advanced/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationBasic/Standard/Advanced/Self-Hosted
storage.sstable.compression_algorithm
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for use in a Pebble store (balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, balanced = 6, good = 7]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup transport (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationAdvanced/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereAdvanced/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsBasic/Standard/Advanced/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Basic/Standard/Advanced/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedBasic/Standard/Advanced/Self-Hosted
trace.span_registry.enabled
booleanfalseif set, ongoing traces can be seen at https://<ui>/#/debug/tracezBasic/Standard/Advanced/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Basic/Standard/Advanced/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeBasic/Standard/Advanced/Self-Hosted
ui.default_timezone
stringthe default timezone used to format timestamps in the uiBasic/Standard/Advanced/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui. This setting is deprecatedand will be removed in a future version. Use the 'ui.default_timezone' setting instead. 'ui.default_timezone' takes precedence over this setting. [etc/utc = 0, america/new_york = 1]Basic/Standard/Advanced/Self-Hosted
version
version25.3set the active cluster version in the format '<major>.<minor>'Basic/Standard/Advanced/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-25.3/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-25.3/sql/aggregates.md new file mode 100644 index 00000000000..3213f0f02a8 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/sql/aggregates.md @@ -0,0 +1,579 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.3/sql/functions.md b/src/current/_includes/cockroach-generated/release-25.3/sql/functions.md new file mode 100644 index 00000000000..14ad4f756cc --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/sql/functions.md @@ -0,0 +1,3616 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(any...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(any...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2. Returns 0 or count+1 for an input outside that range.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Jsonpath functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
jsonb_path_exists(target: jsonb, path: jsonpath) → bool

Checks whether the JSON path returns any item for the specified JSON value.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression. If the silent +argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.)

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression. If the +silent argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, +the function suppresses the following errors: missing object field or array +element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
workload_index_recs(timestamptz: timestamptz) → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_3dlength(geometry: geometry) → float

Returns the 3-dimensional or 2-dimensional length of the geometry.

+

Note ST_3DLength is only valid for LineString or MultiLineString. +For 2-D lines it will return the 2-D length (same as ST_Length and ST_Length2D)

+

This function utilizes the GEOS module.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(any...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string, any...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, any...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input using delimiter and returns the field at return_index_pos (starting from 1). If return_index_pos is negative, it returns the |return_index_pos|'th field from the end.

+

For example, split_part('123.456.789.0', '.', 3) returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring_index(input: string, delim: string, count: int) → string

Returns a substring of input before count occurrences of delim. +If count is positive, the leftmost part is returned. If count is negative, the rightmost part is returned.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(any...) → int

Return size in bytes of the column provided as an argument

+
Stable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it. Currently, this always returns NULL, since CockroachDB does not support default values.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.3/sql/operators.md b/src/current/_includes/cockroach-generated/release-25.3/sql/operators.md new file mode 100644 index 00000000000..3bc5433dd67 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/sql/operators.md @@ -0,0 +1,664 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
collatedstring{*} < collatedstring{*}bool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
collatedstring{*} <= collatedstring{*}bool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
collatedstring{*} = collatedstring{*}bool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
collatedstring{*} IS NOT DISTINCT FROM collatedstring{*}bool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
jsonpath IS NOT DISTINCT FROM jsonpathbool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + + +
LIKEReturn
collatedstring LIKE collatedstringbool
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-25.3/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-25.3/sql/window_functions.md new file mode 100644 index 00000000000..e1032ff82de --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.3/sql/window_functions.md @@ -0,0 +1,413 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.4/eventlog.md b/src/current/_includes/cockroach-generated/release-25.4/eventlog.md new file mode 100644 index 00000000000..f543ad1fd3c --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/eventlog.md @@ -0,0 +1,3839 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `alter_changefeed` + +An event of type `alter_changefeed` is an event for any ALTER CHANGEFEED statements that are run. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescription` | The description of the changefeed job before the ALTER CHANGEFEED. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_canceled` + +An event of type `changefeed_canceled` is an event for any changefeed cancellations. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `low_disk_space` + +An event of type `low_disk_space` is emitted when a store is reaching capacity, as we reach +certain thresholds. It is emitted periodically while we are in a low disk +state. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | +| `PercentThreshold` | The free space percent threshold that we went under. | no | +| `AvailableBytes` | | no | +| `TotalBytes` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Contention events + +Aggregated information about contention events. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `aggregated_contention_info` + +An event of type `aggregated_contention_info` is recorded periodically when contention events +are resolved. + + +| Field | Description | Sensitive | +|--|--|--| +| `WaitingStmtFingerprintId` | | no | +| `WaitingTxnFingerprintId` | | no | +| `BlockingTxnFingerprintId` | | no | +| `ContendedKey` | | partially | +| `Duration` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | +| `Databases` | Databases for the range. | no | +| `Tables` | Tables for the range | no | +| `Indexes` | Indexes for the range | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | +| `GoLimitBytes` | The soft Go memory limit in bytes. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | +| `User` | User is the owner of the job. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `DefaultValue` | The current default value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `unsafe_internals_accessed` + +UnsafeInternalsAccess is recorded when a query accesses unsafe internals +using the allow_unsafe_internals override. + + +| Field | Description | Sensitive | +|--|--|--| +| `Query` | The query that triggered the unsafe internals access. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_internals_denied` + +An event of type `unsafe_internals_denied` is recorded when a query attempts to access unsafe internals +but lacks the appropriate session variables. + + +| Field | Description | Sensitive | +|--|--|--| +| `Query` | The query that triggered the unsafe internals access. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `scan_row_count_misestimate` + +An event of type `scan_row_count_misestimate` is recorded when the optimizer's row count estimate +for a logical scan differs significantly from the actual number of rows read, +and cluster setting `sql.log.scan_row_count_misestimate.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The fully qualified name of the table being scanned. | no | +| `IndexName` | The name of the index being scanned. | no | +| `EstimatedRowCount` | The optimizer's estimated row count for the scan. | no | +| `ActualRowCount` | The actual number of rows read by all processors performing the scan. | no | +| `NanosSinceStatsCollected` | Time in nanoseconds that have passed since full stats were collected on the table. | no | +| `EstimatedStaleness` | Estimated fraction of stale rows in the table based on the time since stats were last collected. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_policy` + +An event of type `create_policy` is recorded when a policy is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the created policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_policy` + +An event of type `drop_policy` is recorded when a policy is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the dropped policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `refresh_materialized_view` + +An event of type `refresh_materialized_view` is recorded when a materialized view is refreshed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the materialized view being refreshed. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +In version 26.1, these events will be moved to the `SQL_EXEC` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `SQL_EXEC` instead of `SQL_PERF`. + +Events in this category are logged to the `SQL_PERF` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +In version 26.1, these events will be moved to the `SQL_EXEC` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `SQL_EXEC` instead of `SQL_INTERNAL_PERF`. + +Events in this category are logged to the `SQL_INTERNAL_PERF` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## TELEMETRY + +Events in this file are related to bulk ingest operations performance metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `bulk_ingest_completed` + +An event of type `bulk_ingest_completed` is an event that is logged when a bulk ingest job +(restore, import, etc.) completes successfully. +It captures key performance metrics for the operation. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | JobID is the ID of the bulk ingest job. | no | +| `JobType` | JobType identifies the type of bulk ingest job (e.g., "restore", "import"). | no | +| `NumRows` | NumRows is the number of rows successfully ingested. | no | +| `DurationSeconds` | Duration of the ingest operation in seconds. | no | +| `DataSizeMb` | Total logical size of data ingested in megabytes. | no | +| `NodeCount` | Number of nodes that participated in the ingest operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network by DistSQL components. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network by DistSQL components. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + +Note: in version 26.1, these events will be moved to the `SQL_EXEC` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `SQL_EXEC` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes by DistSQL components. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query by DistSQL components. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + +Note: in version 26.1, these events will be moved to the `SQL_EXEC` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `SQL_EXEC` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | +| 10 | PROVISIONING_ERROR | is used for errors during the user provisioning phase. This would include errors when the transaction to provision the authenticating user failed to execute. | + + + diff --git a/src/current/_includes/cockroach-generated/release-25.4/logformats.md b/src/current/_includes/cockroach-generated/release-25.4/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-25.4/logging.md b/src/current/_includes/cockroach-generated/release-25.4/logging.md new file mode 100644 index 00000000000..7661187987e --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/logging.md @@ -0,0 +1,188 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + +### `CHANGEFEED` + +The `CHANGEFEED` channel is used to report changefeed events + +### `KV_EXEC` + +The `KV_EXEC` channel is used to report KV execution events that don't fall into the +KV_DISTRIBUTION channel. + diff --git a/src/current/_includes/cockroach-generated/release-25.4/settings/settings.html b/src/current/_includes/cockroach-generated/release-25.4/settings/settings.html new file mode 100644 index 00000000000..1a61f0cfbfe --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/settings/settings.html @@ -0,0 +1,391 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckAdvanced/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingBasic/Standard/Advanced/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlAdvanced/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsBasic/Standard/Advanced/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upAdvanced/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Basic/Standard/Advanced/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillBasic/Standard/Advanced/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestBasic/Standard/Advanced/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesBasic/Standard/Advanced/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultconfigures how work is distributed among nodes for a given changefeed. for the most balanced distribution, use `balanced_simple`. changing this setting will not override locality restrictions [default = 0, balanced_simple = 1]Basic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferBasic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationBasic/Standard/Advanced/Self-Hosted
changefeed.span_checkpoint.lag_threshold
(alias: changefeed.frontier_highwater_lag_checkpoint_threshold)
duration10m0sthe amount of time a changefeed's lagging (slowest) spans must lag behind its leading (fastest) spans before a span-level checkpoint to save leading span progress is written; if 0, span-level checkpoints due to lagging spans is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.kafka.max_request_size
byte size256 MiBthe maximum number of uncompressed bytes sent in a single request to a Kafka broker; lowering this value helps avoid spurious "message too large" errors that can occur when multiple messages are combined into a single batch; this setting is overridden by the per-changefeed Flush { MaxBytes: <int> } optionBasic/Standard/Advanced/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleantrueif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsBasic/Standard/Advanced/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedBasic/Standard/Advanced/Self-Hosted
changefeed.resolved_timestamp.min_update_interval
(alias: changefeed.min_highwater_advance)
duration0sminimum amount of time that must have elapsed since the last time a changefeed's resolved timestamp was updated before it is eligible to be updated again; default of 0 means no minimum interval is enforced but updating will still be limited by the average time it takes to checkpoint progressBasic/Standard/Advanced/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsBasic/Standard/Advanced/Self-Hosted
changefeed.partition_alg.enabled
booleanfalseif enabled, allows specifying the partition_alg changefeed option to choose between fnv-1a (default) and murmur2 hash functions for Kafka partitioning. Only affects changefeeds using a kafka sink with changefeed.new_kafka_sink_enabled set to true.Basic/Standard/Advanced/Self-Hosted
changefeed.progress.frontier_persistence.interval
duration30sminimum amount of time that must elapse before a changefeed will persist its entire span frontier againBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampBasic/Standard/Advanced/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesBasic/Standard/Advanced/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetBasic/Standard/Advanced/Self-Hosted
cluster.organization
stringorganization nameAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetBasic/Standard/Advanced/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipBasic/Standard/Advanced/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Basic/Standard/Advanced/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesBasic/Standard/Advanced/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedBasic/Standard/Advanced/Self-Hosted
enterprise.license
stringthe encoded cluster licenseAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portBasic/Standard/Advanced/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Basic/Standard/Advanced/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.infer_rbr_region_col_using_constraint.enabled
booleanfalseset to true to enable looking up the region column via a foreign key constraint in a REGIONAL BY ROW table, false to disable; default is falseBasic/Standard/Advanced/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.vector_index.enabled
booleantrueset to true to enable vector indexes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedBasic/Standard/Advanced/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersAdvanced/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyAdvanced/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2, multi-metric only = 3, multi-metric and count = 4]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesAdvanced/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsAdvanced/Self-Hosted
kv.bulk_io_write.min_capacity_remaining_fraction
float0.05remaining store capacity fraction below which bulk ingestion requests are rejectedAdvanced/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_auto_tune.enabled
booleanfalseif enabled, observed network latency between leaseholders and their furthest follower will be used to adjust closed timestamp policies for rangesranges configured to serve global reads. kv.closed_timestamp.lead_for_global_reads_override takes precedence if set.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Basic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Basic/Standard/Advanced/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologAdvanced/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.raft.leader_fortification.fraction_enabled
float1controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Advanced/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsAdvanced/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedAdvanced/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedBasic/Standard/Advanced/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Advanced/Self-Hosted
kv.replica_raft.leaderless_unavailable_threshold
duration1m0sduration after which leaderless replicas is considered unavailable. Set to 0 to disable leaderless replica availability checksAdvanced/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Advanced/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Advanced/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsAdvanced/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notBasic/Standard/Advanced/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueAdvanced/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleantrueif set to true, snapshot ingests will be subject to disk write control in ACAdvanced/Self-Hosted
log.channel_compatibility_mode.enabled
booleantruewhen true, logs will continue to log to the expected logging channels; when false, logs will be moved to new logging channels as part of a logging channel consolidation effortBasic/Standard/Advanced/Self-Hosted
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataBasic/Standard/Advanced/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validBasic/Standard/Advanced/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleBasic/Standard/Advanced/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Basic/Standard/Advanced/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableBasic/Standard/Advanced/Self-Hosted
security.provisioning.ldap.enabled
booleanfalseenables automatic creation of SQL users upon successful LDAP loginBasic/Standard/Advanced/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Basic/Standard/Advanced/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Basic/Standard/Advanced/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationBasic/Standard/Advanced/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsBasic/Standard/Advanced/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Basic/Standard/Advanced/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicBasic/Standard/Advanced/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Basic/Standard/Advanced/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Advanced/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogBasic/Standard/Advanced/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationBasic/Standard/Advanced/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutBasic/Standard/Advanced/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Basic/Standard/Advanced/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginBasic/Standard/Advanced/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Basic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesBasic/Standard/Advanced/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesBasic/Standard/Advanced/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while authenticating through the OIDC providerBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Basic/Standard/Advanced/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedAdvanced/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeBasic/Standard/Advanced/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Basic/Standard/Advanced/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Advanced/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outBasic/Standard/Advanced/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadBasic/Standard/Advanced/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMBasic/Standard/Advanced/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptBasic/Standard/Advanced/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Basic/Standard/Advanced/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Basic/Standard/Advanced/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costBasic/Standard/Advanced/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Basic/Standard/Advanced/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Basic/Standard/Advanced/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validBasic/Standard/Advanced/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsAdvanced/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantAdvanced/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesAdvanced/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipBasic/Standard/Advanced/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdBasic/Standard/Advanced/Self-Hosted
sql.auth.skip_underlying_view_privilege_checks.enabled
booleantruedetermines whether to skip privilege checks on tables underlying views. When enabled, users with SELECT privileges on a view can query it regardless of their privileges on the underlying tables, and row-level security policies are evaluated as the invoking user rather than the view owner. This restores pre-v26.2 behavior.Basic/Standard/Advanced/Self-Hosted
sql.catalog.allow_leased_descriptors.enabled
booleanfalseif true, catalog views (crdb_internal, information_schema, pg_catalog) can use leased descriptors for improved performanceBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Basic/Standard/Advanced/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableBasic/Standard/Advanced/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesBasic/Standard/Advanced/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsBasic/Standard/Advanced/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemBasic/Standard/Advanced/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Basic/Standard/Advanced/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsBasic/Standard/Advanced/Self-Hosted
sql.log.scan_row_count_misestimate.enabled
booleanfalsewhen set to true, log a warning when a scan's actual row count differs significantly from the optimizer's estimateBasic/Standard/Advanced/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeBasic/Standard/Advanced/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Basic/Standard/Advanced/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Basic/Standard/Advanced/Self-Hosted
sql.metrics.application_name.enabled
booleanfalsewhen enabled, SQL metrics would export application name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.database_name.enabled
booleanfalsewhen enabled, SQL metrics would export database name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Basic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Basic/Standard/Advanced/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsBasic/Standard/Advanced/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Basic/Standard/Advanced/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionBasic/Standard/Advanced/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentBasic/Standard/Advanced/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityBasic/Standard/Advanced/Self-Hosted
sql.schema.approx_max_object_count
integer20000approximate maximum number of schema objects allowed in the cluster; the check uses cached statistics, so the actual count may slightly exceed this limit; set to 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.schema.auto_unlock.enabled
booleantruecontrols whether DDL operations will attempt to automatically unlock and re-lock schema_locked tables. When this setting is false, DDL on schema_locked tables is blocked unless the user manually unlocks the table first. The schema_locked storage parameter improves changefeed performance by locking the table's schema from the perspective of the changefeed.Basic/Standard/Advanced/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsBasic/Standard/Advanced/Self-Hosted
sql.sqlcommenter.enabled
booleanfalseenables support for sqlcommenter. Key value parsed from sqlcommenter comments will be included in sql insights and sql logs. See https://google.github.io/sqlcommenter/ for more details.Basic/Standard/Advanced/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_full_collection.enabled
booleantrueautomatic full statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleantrueautomatic partial statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobBasic/Standard/Advanced/Self-Hosted
sql.stats.detailed_latency_metrics.enabled
booleanfalselabel latency metrics with the statement fingerprint. Workloads with tens of thousands of distinct query fingerprints should leave this setting false. (experimental, affects performance for workloads with high fingerprint cardinality)Basic/Standard/Advanced/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleanfalseset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Basic/Standard/Advanced/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleanfalseset to true to collect table statistics histograms on non-indexed JSON columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsBasic/Standard/Advanced/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every successful CREATE STATISTICS jobBasic/Standard/Advanced/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestBasic/Standard/Advanced/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.table_statistics_cache.capacity
integer256the maximum number of table statistics entries stored in the LRU cache. Each cache entry corresponds to a single table.Basic/Standard/Advanced/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Basic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsBasic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upBasic/Standard/Advanced/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsBasic/Standard/Advanced/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables transaction traces for transactions exceeding this duration, used with `sql.trace.txn.sample_rate`Basic/Standard/Advanced/Self-Hosted
sql.trace.txn.include_internal.enabled
booleantrueenables tracing internal transactions as well as external workload using sample rate and threshold settingsBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.jaeger_json_output.enabled
booleanfalseenables Jaeger JSON output for transaction traces in logsBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.sample_rate
float1enables probabilistic transaction tracing. It should be used in conjunction with `sql.trace.txn.enable_threshold`. A percentage of transactions between 0 and 1.0 will have tracing enabled, and only those which exceed the configured threshold will be logged.Basic/Standard/Advanced/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledBasic/Standard/Advanced/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedBasic/Standard/Advanced/Self-Hosted
sql.vecindex.stalled_op.timeout
duration100msamount of time before other vector index workers will assist with a stalled background fixupBasic/Standard/Advanced/Self-Hosted
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationAdvanced/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesBasic/Standard/Advanced/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationBasic/Standard/Advanced/Self-Hosted
storage.sstable.compression_algorithm
enumerationfastestdetermines the compression algorithm to use for Pebble stores [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, balanced = 6, good = 7, fast = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup transport (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.unhealthy_write_duration
duration20sduration for disk write operations, beyond which the disk will be reported as unhealthy for higher layer actionsAdvanced/Self-Hosted
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationAdvanced/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereAdvanced/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsBasic/Standard/Advanced/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Basic/Standard/Advanced/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedBasic/Standard/Advanced/Self-Hosted
trace.span_registry.enabled
booleanfalseif set, ongoing traces can be seen at https://<ui>/#/debug/tracezBasic/Standard/Advanced/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Basic/Standard/Advanced/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeBasic/Standard/Advanced/Self-Hosted
ui.default_timezone
stringthe default timezone used to format timestamps in the uiBasic/Standard/Advanced/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui. This setting is deprecatedand will be removed in a future version. Use the 'ui.default_timezone' setting instead. 'ui.default_timezone' takes precedence over this setting. [etc/utc = 0, america/new_york = 1]Basic/Standard/Advanced/Self-Hosted
version
version25.4set the active cluster version in the format '<major>.<minor>'Basic/Standard/Advanced/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-25.4/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-25.4/sql/aggregates.md new file mode 100644 index 00000000000..1683084408b --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/sql/aggregates.md @@ -0,0 +1,589 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: ltree) → ltree[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: ltree[]) → ltree[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: ltree[]) → ltree[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: ltree) → ltree

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: ltree) → ltree

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.4/sql/functions.md b/src/current/_includes/cockroach-generated/release-25.4/sql/functions.md new file mode 100644 index 00000000000..8be2853fd67 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/sql/functions.md @@ -0,0 +1,3669 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: ltree[], elem: ltree) → ltree[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: ltree[], right: ltree[]) → ltree[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: ltree[], elem: ltree) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: ltree[], elem: ltree, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: ltree[], elem: ltree) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: ltree, array: ltree[]) → ltree[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: ltree[], elem: ltree) → ltree[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: ltree[], toreplace: ltree, replacewith: ltree) → ltree[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(any...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(any...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
levenshtein_less_equal(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int, max_d: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. If actual distance is less or equal then max_d, then it returns the distance. Otherwise this function returns a value greater than max_d. The maximum length of the input strings is 255 characters.

+
Immutable
levenshtein_less_equal(source: string, target: string, max_d: int) → int

Calculates the Levenshtein distance between two strings. If actual distance is less or equal then max_d, then it returns the distance. Otherwise this function returns a value greater than max_d. The maximum length of the input strings is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2. Returns 0 or count+1 for an input outside that range.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Jsonpath functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
jsonb_path_exists(target: jsonb, path: jsonpath) → bool

Checks whether the JSON path returns any item for the specified JSON value.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression. If the silent +argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.)

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression. If the +silent argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, +the function suppresses the following errors: missing object field or array +element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
+ +### LTree functions + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
index(a: ltree, b: ltree) → int

position of first occurrence of b in a; -1 if not found

+
Immutable
index(a: ltree, b: ltree, offset: int) → int

position of first occurrence of b in a, starting at offset; -1 if not found

+
Immutable
lca(ltree, ltree, ltree...) → ltree

lowest common ancestor, i.e., longest common prefix of paths

+
Immutable
lca(ltree[]: ltree[]) → ltree

lowest common ancestor, i.e., longest common prefix of paths

+
Immutable
ltree2text(ltree: ltree) → string

cast ltree to text

+
Immutable
nlevel(ltree: ltree) → int

number of labels in path ltree

+
Immutable
subltree(ltree: ltree, start: int, end: int) → ltree

subpath of ltree from position start to position end-1 (counting from 0)

+
Immutable
subpath(ltree: ltree, offset: int) → ltree

subpath of ltree starting at position offset, extending to end of path. If offset is negative, subpath starts that far from the end of the path.

+
Immutable
subpath(ltree: ltree, offset: int, length: int) → ltree

subpath of ltree starting at position offset, length length. If offset is negative, subpath starts that far from the end of the path. If length is negative, leaves that many labels off the end of the path.

+
Immutable
text2ltree(text: string) → ltree

cast text to ltree

+
Immutable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
workload_index_recs(timestamptz: timestamptz) → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_3dlength(geometry: geometry) → float

Returns the 3-dimensional or 2-dimensional length of the geometry.

+

Note ST_3DLength is only valid for LineString or MultiLineString. +For 2-D lines it will return the 2-D length (same as ST_Length and ST_Length2D)

+

This function utilizes the GEOS module.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(any...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string, any...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, any...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input using delimiter and returns the field at return_index_pos (starting from 1). If return_index_pos is negative, it returns the |return_index_pos|'th field from the end.

+

For example, split_part('123.456.789.0', '.', 3) returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring_index(input: string, delim: string, count: int) → string

Returns a substring of input before count occurrences of delim. +If count is positive, the leftmost part is returned. If count is negative, the rightmost part is returned.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_system_privilege(privilege: string) → bool

Returns whether or not the current user has privileges for system.

+
Stable
has_system_privilege(user: string, privilege: string) → bool

Returns whether or not the user has privileges for system.

+
Stable
has_system_privilege(user: oid, privilege: string) → bool

Returns whether or not the user has privileges for system.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(any...) → int

Return size in bytes of the column provided as an argument

+
Stable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-25.4/sql/operators.md b/src/current/_includes/cockroach-generated/release-25.4/sql/operators.md new file mode 100644 index 00000000000..e79484b0754 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/sql/operators.md @@ -0,0 +1,684 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
collatedstring{*} < collatedstring{*}bool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
ltree < ltreebool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
collatedstring{*} <= collatedstring{*}bool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
ltree <= ltreebool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
ltree <@ ltreebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
collatedstring{*} = collatedstring{*}bool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
ltree = ltreebool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?<@Return
ltree ?<@ ltreeltree
+ + + + +
?@>Return
ltree ?@> ltreeltree
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
ltree @> ltreebool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
ltree IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
collatedstring{*} IS NOT DISTINCT FROM collatedstring{*}bool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
jsonpath IS NOT DISTINCT FROM jsonpathbool
ltree IS NOT DISTINCT FROM ltreebool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + + +
LIKEReturn
collatedstring LIKE collatedstringbool
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
ltree || ltreeltree
ltree || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || ltreestring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-25.4/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-25.4/sql/window_functions.md new file mode 100644 index 00000000000..321cc02ccf9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-25.4/sql/window_functions.md @@ -0,0 +1,431 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: ltree) → ltree

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: ltree) → ltree

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: ltree, n: int) → ltree

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: ltree, n: int, default: ltree) → ltree

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: ltree) → ltree

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: ltree) → ltree

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: ltree, n: int) → ltree

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: ltree, n: int, default: ltree) → ltree

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: ltree, n: int) → ltree

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-26.1/eventlog.md b/src/current/_includes/cockroach-generated/release-26.1/eventlog.md new file mode 100644 index 00000000000..2edf5ed39f4 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/eventlog.md @@ -0,0 +1,3874 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `CHANGEFEED` channel. + + +### `alter_changefeed` + +An event of type `alter_changefeed` is an event for any ALTER CHANGEFEED statements that are run. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescription` | The description of the changefeed job before the ALTER CHANGEFEED. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_canceled` + +An event of type `changefeed_canceled` is an event for any changefeed cancellations. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + +Note: in version 26.1, these events moved to the `CHANGEFEED` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `TELEMETRY` instead of `CHANGEFEED`. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `low_disk_space` + +An event of type `low_disk_space` is emitted when a store is reaching capacity, as we reach +certain thresholds. It is emitted periodically while we are in a low disk +state. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | +| `PercentThreshold` | The free space percent threshold that we went under. | no | +| `AvailableBytes` | | no | +| `TotalBytes` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Contention events + +Aggregated information about contention events. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `aggregated_contention_info` + +An event of type `aggregated_contention_info` is recorded periodically when contention events +are resolved. + + +| Field | Description | Sensitive | +|--|--|--| +| `WaitingStmtFingerprintId` | | no | +| `WaitingTxnFingerprintId` | | no | +| `BlockingTxnFingerprintId` | | no | +| `ContendedKey` | | partially | +| `Duration` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | +| `Databases` | Databases for the range. | no | +| `Tables` | Tables for the range | no | +| `Indexes` | Indexes for the range | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | +| `GoLimitBytes` | The soft Go memory limit in bytes. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | +| `User` | User is the owner of the job. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `rewrite_inline_hints` + +An event of type `rewrite_inline_hints` is recorded when a new inline-hints rewrite rule is added +via information_schema.crdb_rewrite_inline_hints. + + +| Field | Description | Sensitive | +|--|--|--| +| `StatementFingerprint` | The target statement fingerprint for which inline hints are being rewritten. | no | +| `DonorSQL` | The donor statement providing the inline hints. | no | +| `HintID` | The hint ID of the newly created statement hint. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `DefaultValue` | The current default value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `unsafe_internals_accessed` + +UnsafeInternalsAccess is recorded when a query accesses unsafe internals +using the allow_unsafe_internals override. + + +| Field | Description | Sensitive | +|--|--|--| +| `Query` | The query that triggered the unsafe internals access. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_internals_denied` + +An event of type `unsafe_internals_denied` is recorded when a query attempts to access unsafe internals +but lacks the appropriate session variables. + + +| Field | Description | Sensitive | +|--|--|--| +| `Query` | The query that triggered the unsafe internals access. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_policy` + +An event of type `create_policy` is recorded when a policy is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the created policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_policy` + +An event of type `drop_policy` is recorded when a policy is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the dropped policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `refresh_materialized_view` + +An event of type `refresh_materialized_view` is recorded when a materialized view is refreshed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the materialized view being refreshed. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | yes | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | yes | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | yes | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +In version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `SQL_PERF` instead of `SQL_EXEC`. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `scan_row_count_misestimate` + +An event of type `scan_row_count_misestimate` is recorded when the optimizer's row count estimate +for a logical scan differs significantly from the actual number of rows read, +and cluster setting `sql.log.scan_row_count_misestimate.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The fully qualified name of the table being scanned. | no | +| `IndexName` | The name of the index being scanned. | no | +| `EstimatedRowCount` | The optimizer's estimated row count for the scan. | no | +| `ActualRowCount` | The actual number of rows read by all processors performing the scan. | no | +| `NanosSinceStatsCollected` | Time in nanoseconds that have passed since full stats were collected on the table. | no | +| `EstimatedStaleness` | Estimated fraction of stale rows in the table based on the time since stats were last collected. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +In version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `SQL_INTERNAL_PERF` instead of `SQL_EXEC`. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Sampled SQL Events + +Events in this category report sample of SQL events. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network by DistSQL components. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network by DistSQL components. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | +| `AdmissionWaitTime` | AdmissionWaitTime is the cumulative time spent in admission control queues. | no | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + +Note: in version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `TELEMETRY` instead of `SQL_EXEC`. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes by DistSQL components. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query by DistSQL components. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + +Note: in version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `TELEMETRY` instead of `SQL_EXEC`. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## TELEMETRY + +Events in this file are related to bulk ingest operations performance metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `bulk_ingest_completed` + +An event of type `bulk_ingest_completed` is an event that is logged when a bulk ingest job +(restore, import, etc.) completes successfully. +It captures key performance metrics for the operation. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | JobID is the ID of the bulk ingest job. | no | +| `JobType` | JobType identifies the type of bulk ingest job (e.g., "restore", "import"). | no | +| `NumRows` | NumRows is the number of rows successfully ingested. | no | +| `DurationSeconds` | Duration of the ingest operation in seconds. | no | +| `DataSizeMb` | Total logical size of data ingested in megabytes. | no | +| `NodeCount` | Number of nodes that participated in the ingest operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | +| 10 | PROVISIONING_ERROR | is used for errors during the user provisioning phase. This would include errors when the transaction to provision the authenticating user failed to execute. | + + + diff --git a/src/current/_includes/cockroach-generated/release-26.1/logformats.md b/src/current/_includes/cockroach-generated/release-26.1/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-26.1/logging.md b/src/current/_includes/cockroach-generated/release-26.1/logging.md new file mode 100644 index 00000000000..7661187987e --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/logging.md @@ -0,0 +1,188 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + +### `CHANGEFEED` + +The `CHANGEFEED` channel is used to report changefeed events + +### `KV_EXEC` + +The `KV_EXEC` channel is used to report KV execution events that don't fall into the +KV_DISTRIBUTION channel. + diff --git a/src/current/_includes/cockroach-generated/release-26.1/settings/settings.html b/src/current/_includes/cockroach-generated/release-26.1/settings/settings.html new file mode 100644 index 00000000000..a08f0e0ad85 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/settings/settings.html @@ -0,0 +1,396 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckAdvanced/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingBasic/Standard/Advanced/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlAdvanced/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsBasic/Standard/Advanced/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upAdvanced/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Basic/Standard/Advanced/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillBasic/Standard/Advanced/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestBasic/Standard/Advanced/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesBasic/Standard/Advanced/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultcontrols how changefeed work is distributed across nodes. 'default' defers to DistSQL for node selection and work distribution. 'balanced_simple' uses DistSQL for node selection but then attempts to evenly distribute ranges across those selected nodes for better load balancing. this setting does not override locality restrictions and can be overridden per-changefeed using the 'range_distribution_strategy' option. [default = 0, balanced_simple = 1]Basic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferBasic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationBasic/Standard/Advanced/Self-Hosted
changefeed.span_checkpoint.lag_threshold
(alias: changefeed.frontier_highwater_lag_checkpoint_threshold)
duration10m0sthe amount of time a changefeed's lagging (slowest) spans must lag behind its leading (fastest) spans before a span-level checkpoint to save leading span progress is written; if 0, span-level checkpoints due to lagging spans is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.kafka.max_request_size
byte size256 MiBthe maximum number of uncompressed bytes sent in a single request to a Kafka broker; lowering this value helps avoid spurious "message too large" errors that can occur when multiple messages are combined into a single batch; this setting is overridden by the per-changefeed Flush { MaxBytes: <int> } optionBasic/Standard/Advanced/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleantrueif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsBasic/Standard/Advanced/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedBasic/Standard/Advanced/Self-Hosted
changefeed.resolved_timestamp.min_update_interval
(alias: changefeed.min_highwater_advance)
duration0sminimum amount of time that must have elapsed since the last time a changefeed's resolved timestamp was updated before it is eligible to be updated again; default of 0 means no minimum interval is enforced but updating will still be limited by the average time it takes to checkpoint progressBasic/Standard/Advanced/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsBasic/Standard/Advanced/Self-Hosted
changefeed.partition_alg.enabled
booleanfalseif enabled, allows specifying the partition_alg changefeed option to choose between fnv-1a (default) and murmur2 hash functions for Kafka partitioning. Only affects changefeeds using a kafka sink with changefeed.new_kafka_sink_enabled set to true.Basic/Standard/Advanced/Self-Hosted
changefeed.progress.frontier_persistence.interval
duration30sminimum amount of time that must elapse before a changefeed will persist its entire span frontier againBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampBasic/Standard/Advanced/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesBasic/Standard/Advanced/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetBasic/Standard/Advanced/Self-Hosted
cluster.organization
stringorganization nameAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetBasic/Standard/Advanced/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipBasic/Standard/Advanced/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Basic/Standard/Advanced/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesBasic/Standard/Advanced/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedBasic/Standard/Advanced/Self-Hosted
enterprise.license
stringthe encoded cluster licenseAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portBasic/Standard/Advanced/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Basic/Standard/Advanced/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.infer_rbr_region_col_using_constraint.enabled
booleanfalseset to true to enable looking up the region column via a foreign key constraint in a REGIONAL BY ROW table, false to disable; default is falseBasic/Standard/Advanced/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.vector_index.enabled
booleantrueset to true to enable vector indexes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedBasic/Standard/Advanced/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersAdvanced/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latencyAdvanced/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2, multi-metric only = 3, multi-metric and count = 4]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesAdvanced/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsAdvanced/Self-Hosted
kv.bulk_io_write.min_capacity_remaining_fraction
float0.05remaining store capacity fraction below which bulk ingestion requests are rejectedAdvanced/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_auto_tune.enabled
booleanfalseif enabled, observed network latency between leaseholders and their furthest follower will be used to adjust closed timestamp policies for rangesranges configured to serve global reads. kv.closed_timestamp.lead_for_global_reads_override takes precedence if set.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_pacing_refresh_interval
duration10msthe refresh interval for the task pacer that controls pacing of sending sidetransport updates to avoid overloading the system when many connections are waitingAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_pacing_smear_interval
duration1msthe smear interval for the task pacer that controls the amount of time each paced batch is going to take when broadcasting sidetransport updatesAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Basic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Basic/Standard/Advanced/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologAdvanced/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.raft.leader_fortification.fraction_enabled
float1controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Advanced/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsAdvanced/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedAdvanced/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedBasic/Standard/Advanced/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Advanced/Self-Hosted
kv.replica_raft.leaderless_unavailable_threshold
duration1m0sduration after which leaderless replicas is considered unavailable. Set to 0 to disable leaderless replica availability checksAdvanced/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Advanced/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Advanced/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsAdvanced/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notBasic/Standard/Advanced/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueAdvanced/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleantrueif set to true, snapshot ingests will be subject to disk write control in ACAdvanced/Self-Hosted
log.channel_compatibility_mode.enabled
booleanfalsewhen true, logs will to log to their legacy (pre 26.1) logging channels; when false, logs will be logged to new logging channelsBasic/Standard/Advanced/Self-Hosted
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataBasic/Standard/Advanced/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validBasic/Standard/Advanced/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleBasic/Standard/Advanced/Self-Hosted
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Basic/Standard/Advanced/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableBasic/Standard/Advanced/Self-Hosted
security.provisioning.ldap.enabled
booleanfalseenables automatic creation of SQL users upon successful LDAP loginBasic/Standard/Advanced/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Basic/Standard/Advanced/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Basic/Standard/Advanced/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationBasic/Standard/Advanced/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsBasic/Standard/Advanced/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Basic/Standard/Advanced/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicBasic/Standard/Advanced/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Basic/Standard/Advanced/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Advanced/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogBasic/Standard/Advanced/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationBasic/Standard/Advanced/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutBasic/Standard/Advanced/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Basic/Standard/Advanced/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginBasic/Standard/Advanced/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Basic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesBasic/Standard/Advanced/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesBasic/Standard/Advanced/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while authenticating through the OIDC providerBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Basic/Standard/Advanced/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedAdvanced/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeBasic/Standard/Advanced/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Basic/Standard/Advanced/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Advanced/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent outBasic/Standard/Advanced/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadBasic/Standard/Advanced/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMBasic/Standard/Advanced/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptBasic/Standard/Advanced/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Basic/Standard/Advanced/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Basic/Standard/Advanced/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costBasic/Standard/Advanced/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Basic/Standard/Advanced/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Basic/Standard/Advanced/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validBasic/Standard/Advanced/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsAdvanced/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantAdvanced/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesAdvanced/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipBasic/Standard/Advanced/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdBasic/Standard/Advanced/Self-Hosted
sql.auth.skip_underlying_view_privilege_checks.enabled
booleantruedetermines whether to skip privilege checks on tables underlying views. When enabled, users with SELECT privileges on a view can query it regardless of their privileges on the underlying tables, and row-level security policies are evaluated as the invoking user rather than the view owner. This restores pre-v26.2 behavior.Basic/Standard/Advanced/Self-Hosted
sql.catalog.allow_leased_descriptors.enabled
booleantrueif true, catalog views (crdb_internal, information_schema, pg_catalog) can use leased descriptors for improved performanceBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Basic/Standard/Advanced/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.super_regions.enabled
booleanfalsedefault value for enable_super_regions; allows for the usage of super regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_err
byte size512 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_log
byte size64 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableBasic/Standard/Advanced/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesBasic/Standard/Advanced/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsBasic/Standard/Advanced/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemBasic/Standard/Advanced/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Basic/Standard/Advanced/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsBasic/Standard/Advanced/Self-Hosted
sql.log.scan_row_count_misestimate.enabled
booleanfalsewhen set to true, log a warning when a scan's actual row count differs significantly from the optimizer's estimateBasic/Standard/Advanced/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeBasic/Standard/Advanced/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configuration. An enterprise license is required for this cluster setting to take effect.Basic/Standard/Advanced/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Basic/Standard/Advanced/Self-Hosted
sql.metrics.application_name.enabled
booleanfalsewhen enabled, SQL metrics would export application name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.database_name.enabled
booleanfalsewhen enabled, SQL metrics would export database name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Basic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Basic/Standard/Advanced/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsBasic/Standard/Advanced/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Basic/Standard/Advanced/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionBasic/Standard/Advanced/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentBasic/Standard/Advanced/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityBasic/Standard/Advanced/Self-Hosted
sql.schema.approx_max_object_count
integer20000approximate maximum number of schema objects allowed in the cluster; the check uses cached statistics, so the actual count may slightly exceed this limit; set to 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.schema.auto_unlock.enabled
booleantruecontrols whether DDL operations will attempt to automatically unlock and re-lock schema_locked tables. When this setting is false, DDL on schema_locked tables is blocked unless the user manually unlocks the table first. The schema_locked storage parameter improves changefeed performance by locking the table's schema from the perspective of the changefeed.Basic/Standard/Advanced/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsBasic/Standard/Advanced/Self-Hosted
sql.sqlcommenter.enabled
booleanfalseenables support for sqlcommenter. Key value parsed from sqlcommenter comments will be included in sql insights and sql logs. See https://google.github.io/sqlcommenter/ for more details.Basic/Standard/Advanced/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_extremes_concurrency_limit
integer128determines the maximum number of concurrent automatic partial USING EXTREMES table statistics collection jobsBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_full_collection.enabled
booleantrueautomatic full statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_full_concurrency_limit
integer1determines the maximum number of concurrent automatic full table statistics collection jobsBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleantrueautomatic partial statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobBasic/Standard/Advanced/Self-Hosted
sql.stats.detailed_latency_metrics.enabled
booleanfalselabel latency metrics with the statement fingerprint. Workloads with tens of thousands of distinct query fingerprints should leave this setting false. (experimental, affects performance for workloads with high fingerprint cardinality)Basic/Standard/Advanced/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleanfalseset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Basic/Standard/Advanced/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleanfalseset to true to collect table statistics histograms on non-indexed JSON columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsBasic/Standard/Advanced/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every successful CREATE STATISTICS jobBasic/Standard/Advanced/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestBasic/Standard/Advanced/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.table_statistics_cache.capacity
integer256the maximum number of table statistics entries stored in the LRU cache. Each cache entry corresponds to a single table.Basic/Standard/Advanced/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Basic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsBasic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upBasic/Standard/Advanced/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsBasic/Standard/Advanced/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables transaction traces for transactions exceeding this duration, used with `sql.trace.txn.sample_rate`Basic/Standard/Advanced/Self-Hosted
sql.trace.txn.include_internal.enabled
booleantrueenables tracing internal transactions as well as external workload using sample rate and threshold settingsBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.jaeger_json_output.enabled
booleanfalseenables Jaeger JSON output for transaction traces in logsBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.sample_rate
float1enables probabilistic transaction tracing. It should be used in conjunction with `sql.trace.txn.enable_threshold`. A percentage of transactions between 0 and 1.0 will have tracing enabled, and only those which exceed the configured threshold will be logged.Basic/Standard/Advanced/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledBasic/Standard/Advanced/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedBasic/Standard/Advanced/Self-Hosted
sql.vecindex.stalled_op.timeout
duration100msamount of time before other vector index workers will assist with a stalled background fixupBasic/Standard/Advanced/Self-Hosted
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationAdvanced/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesBasic/Standard/Advanced/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationBasic/Standard/Advanced/Self-Hosted
storage.sstable.compression_algorithm
enumerationfastestdetermines the compression algorithm to use for Pebble stores [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, balanced = 6, good = 7, fast = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup transport (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.unhealthy_write_duration
duration20sduration for disk write operations, beyond which the disk will be reported as unhealthy for higher layer actionsAdvanced/Self-Hosted
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationAdvanced/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereAdvanced/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
timeseries.storage.resolution_1m.ttl
duration240h0m0sthe maximum age of time series data stored at the 1 minute resolution. Data older than this is subject to rollup and deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsBasic/Standard/Advanced/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Basic/Standard/Advanced/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedBasic/Standard/Advanced/Self-Hosted
trace.span_registry.enabled
booleanfalseif set, ongoing traces can be seen at https://<ui>/#/debug/tracezBasic/Standard/Advanced/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Basic/Standard/Advanced/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeBasic/Standard/Advanced/Self-Hosted
ui.default_timezone
stringthe default timezone used to format timestamps in the uiBasic/Standard/Advanced/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui. This setting is deprecatedand will be removed in a future version. Use the 'ui.default_timezone' setting instead. 'ui.default_timezone' takes precedence over this setting. [etc/utc = 0, america/new_york = 1]Basic/Standard/Advanced/Self-Hosted
version
version26.1set the active cluster version in the format '<major>.<minor>'Basic/Standard/Advanced/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-26.1/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-26.1/sql/aggregates.md new file mode 100644 index 00000000000..1683084408b --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/sql/aggregates.md @@ -0,0 +1,589 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: ltree) → ltree[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: ltree[]) → ltree[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: ltree[]) → ltree[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: ltree) → ltree

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: ltree) → ltree

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-26.1/sql/functions.md b/src/current/_includes/cockroach-generated/release-26.1/sql/functions.md new file mode 100644 index 00000000000..a06f3f3ae09 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/sql/functions.md @@ -0,0 +1,3680 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: ltree[], elem: ltree) → ltree[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: ltree[], right: ltree[]) → ltree[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: ltree[], elem: ltree) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: ltree[], elem: ltree, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: ltree[], elem: ltree) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: ltree, array: ltree[]) → ltree[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: ltree[], elem: ltree) → ltree[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: ltree[], toreplace: ltree, replacewith: ltree) → ltree[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(any...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(any...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
levenshtein_less_equal(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int, max_d: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. If actual distance is less or equal then max_d, then it returns the distance. Otherwise this function returns a value greater than max_d. The maximum length of the input strings is 255 characters.

+
Immutable
levenshtein_less_equal(source: string, target: string, max_d: int) → int

Calculates the Levenshtein distance between two strings. If actual distance is less or equal then max_d, then it returns the distance. Otherwise this function returns a value greater than max_d. The maximum length of the input strings is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2. Returns 0 or count+1 for an input outside that range.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Jsonpath functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
jsonb_path_exists(target: jsonb, path: jsonpath) → bool

Checks whether the JSON path returns any item for the specified JSON value.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression. If the silent +argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.)

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression. If the +silent argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, +the function suppresses the following errors: missing object field or array +element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
+ +### LTree functions + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
index(a: ltree, b: ltree) → int

position of first occurrence of b in a; -1 if not found

+
Immutable
index(a: ltree, b: ltree, offset: int) → int

position of first occurrence of b in a, starting at offset; -1 if not found

+
Immutable
lca(ltree, ltree, ltree...) → ltree

lowest common ancestor, i.e., longest common prefix of paths

+
Immutable
lca(ltree[]: ltree[]) → ltree

lowest common ancestor, i.e., longest common prefix of paths

+
Immutable
ltree2text(ltree: ltree) → string

cast ltree to text

+
Immutable
nlevel(ltree: ltree) → int

number of labels in path ltree

+
Immutable
subltree(ltree: ltree, start: int, end: int) → ltree

subpath of ltree from position start to position end-1 (counting from 0)

+
Immutable
subpath(ltree: ltree, offset: int) → ltree

subpath of ltree starting at position offset, extending to end of path. If offset is negative, subpath starts that far from the end of the path.

+
Immutable
subpath(ltree: ltree, offset: int, length: int) → ltree

subpath of ltree starting at position offset, length length. If offset is negative, subpath starts that far from the end of the path. If length is negative, leaves that many labels off the end of the path.

+
Immutable
text2ltree(text: string) → ltree

cast text to ltree

+
Immutable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem stuff (returns no rows as this feature is unsupported in CockroachDB)

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
workload_index_recs(timestamptz: timestamptz) → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_3dlength(geometry: geometry) → float

Returns the 3-dimensional or 2-dimensional length of the geometry.

+

Note ST_3DLength is only valid for LineString or MultiLineString. +For 2-D lines it will return the 2-D length (same as ST_Length and ST_Length2D)

+

This function utilizes the GEOS module.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(any...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string, any...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, any...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input using delimiter and returns the field at return_index_pos (starting from 1). If return_index_pos is negative, it returns the |return_index_pos|'th field from the end.

+

For example, split_part('123.456.789.0', '.', 3) returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring_index(input: string, delim: string, count: int) → string

Returns a substring of input before count occurrences of delim. +If count is positive, the leftmost part is returned. If count is negative, the rightmost part is returned.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
information_schema.crdb_datums_to_bytes(any...) → bytes

Converts datums into key-encoded bytes. Supports NULLs and all data types which may be used in index keys

+
Immutable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### System repair functions + + + + + +
Function → ReturnsDescriptionVolatility
information_schema.crdb_rewrite_inline_hints(statement_fingerprint: string, donor_sql: string) → int

This function adds an inline-hints rewrite rule for a statement fingerprint. It returns the hint ID of the newly created rewrite rule. The rewrite rule only applies to matching statement fingerprints. It first removes all inline hints from the target statement, and then copies inline hints from the donor statement.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_system_privilege(privilege: string) → bool

Returns whether or not the current user has privileges for system.

+
Stable
has_system_privilege(user: string, privilege: string) → bool

Returns whether or not the user has privileges for system.

+
Stable
has_system_privilege(user: oid, privilege: string) → bool

Returns whether or not the user has privileges for system.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(any...) → int

Return size in bytes of the column provided as an argument

+
Stable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-26.1/sql/operators.md b/src/current/_includes/cockroach-generated/release-26.1/sql/operators.md new file mode 100644 index 00000000000..e79484b0754 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/sql/operators.md @@ -0,0 +1,684 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
collatedstring{*} < collatedstring{*}bool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
ltree < ltreebool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
collatedstring{*} <= collatedstring{*}bool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
ltree <= ltreebool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
ltree <@ ltreebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
collatedstring{*} = collatedstring{*}bool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
ltree = ltreebool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?<@Return
ltree ?<@ ltreeltree
+ + + + +
?@>Return
ltree ?@> ltreeltree
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
ltree @> ltreebool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
ltree IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
collatedstring{*} IS NOT DISTINCT FROM collatedstring{*}bool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
jsonpath IS NOT DISTINCT FROM jsonpathbool
ltree IS NOT DISTINCT FROM ltreebool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + + +
LIKEReturn
collatedstring LIKE collatedstringbool
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
ltree || ltreeltree
ltree || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || ltreestring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-26.1/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-26.1/sql/window_functions.md new file mode 100644 index 00000000000..321cc02ccf9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.1/sql/window_functions.md @@ -0,0 +1,431 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: ltree) → ltree

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: ltree) → ltree

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: ltree, n: int) → ltree

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: ltree, n: int, default: ltree) → ltree

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: ltree) → ltree

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: ltree) → ltree

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: ltree, n: int) → ltree

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: ltree, n: int, default: ltree) → ltree

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: ltree, n: int) → ltree

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-26.2/eventlog.md b/src/current/_includes/cockroach-generated/release-26.2/eventlog.md new file mode 100644 index 00000000000..c891924297e --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/eventlog.md @@ -0,0 +1,4001 @@ +Certain notable events are reported using a structured format. +Commonly, these notable events are also copied to the table +`system.eventlog`, unless the cluster setting +`server.eventlog.enabled` is unset. + +Additionally, notable events are copied to specific external logging +channels in log messages, where they can be collected for further processing. + +The sections below document the possible notable event types +in this version of CockroachDB. For each event type, a table +documents the possible fields. A field may be omitted from +an event if its value is empty or zero. + +A field is also considered "Sensitive" if it may contain +application-specific information or personally identifiable information (PII). In that case, +the copy of the event sent to the external logging channel +will contain redaction markers in a format that is compatible +with the redaction facilities in [`cockroach debug zip`](cockroach-debug-zip.html) +and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html), +provided the `redactable` functionality is enabled on the logging sink. + +Events not documented on this page will have an unstructured format in log messages. + +## ASH events + +Events in this category pertain to Active Session History (ASH) +sampling diagnostics. + +Events in this category are logged to the `OPS` channel. + + +### `ash_workload_summary` + +An event of type `ash_workload_summary` is emitted periodically with a top-N summary of +the most frequently sampled workloads in the Active Session History +(ASH) buffer. One event is emitted per top-N entry. + +Reserved and subject to change without notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `WindowDurationMillis` | The duration of the reporting window in milliseconds. | no | +| `WorkEventType` | The work event type (e.g. CPU, IO, LOCK). | no | +| `WorkEvent` | The specific event name within the event type. | no | +| `WorkloadID` | The workload identifier (ex/ statement fingerprint). | no | +| `SampleCount` | The number of samples for this workload entry in the reporting window. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Changefeed telemetry events + +Events in this category pertain to changefeed usage and metrics. + +Events in this category are logged to the `CHANGEFEED` channel. + + +### `alter_changefeed` + +An event of type `alter_changefeed` is an event for any ALTER CHANGEFEED statements that are run. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescription` | The description of the changefeed job before the ALTER CHANGEFEED. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_canceled` + +An event of type `changefeed_canceled` is an event for any changefeed cancellations. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_emitted_bytes` + +An event of type `changefeed_emitted_bytes` is an event representing the bytes emitted by a changefeed over an interval. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `EmittedBytes` | The number of bytes emitted. | no | +| `EmittedMessages` | The number of messages emitted. | no | +| `LoggingInterval` | The time period in nanoseconds between emitting telemetry events of this type (per-aggregator). | no | +| `Closing` | Flag to indicate that the changefeed is closing. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `changefeed_failed` + +An event of type `changefeed_failed` is an event for any changefeed failure since the plan hook +was triggered. + +Note: in version 26.1, these events will be moved to the `CHANGEFEED` channel. +To test compatability before this, set the cluster setting +`log.channel_compatibility_mode.enabled` to false. This will send the +events to `CHANGEFEED` instead of `TELEMETRY`. + + +| Field | Description | Sensitive | +|--|--|--| +| `FailureType` | The reason / environment with which the changefeed failed (ex: connection_closed, changefeed_behind). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +### `create_changefeed` + +An event of type `create_changefeed` is an event for any CREATE CHANGEFEED query that +successfully starts running. Failed CREATE statements will show up as +ChangefeedFailed events. + +Note: in version 26.1, these events moved to the `CHANGEFEED` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `TELEMETRY` instead of `CHANGEFEED`. + + +| Field | Description | Sensitive | +|--|--|--| +| `Transformation` | Flag representing whether the changefeed is using CDC queries. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Description` | The description of that would show up in the job's description field, redacted | yes | +| `SinkType` | The type of sink being emitted to (ex: kafka, nodelocal, webhook-https). | no | +| `NumTables` | The number of tables listed in the query that the changefeed is to run on. | no | +| `Resolved` | The behavior of emitted resolved spans (ex: yes, no, 10s) | no | +| `InitialScan` | The desired behavior of initial scans (ex: yes, no, only) | no | +| `Format` | The data format being emitted (ex: JSON, Avro). | no | +| `JobId` | The job id for enterprise changefeeds. | no | + +## Cluster-level events + +Events in this category pertain to an entire cluster and are +not relative to any particular tenant. + +In a multi-tenant setup, the `system.eventlog` table for individual +tenants cannot contain a copy of cluster-level events; conversely, +the `system.eventlog` table in the system tenant cannot contain the +SQL-level events for individual tenants. + +Events in this category are logged to the `OPS` channel. + + +### `certs_reload` + +An event of type `certs_reload` is recorded when the TLS certificates are +reloaded/rotated from disk. + + +| Field | Description | Sensitive | +|--|--|--| +| `Success` | Whether the operation completed without errors. | no | +| `ErrorMessage` | If an error was encountered, the text of the error. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_cleared` + +An event of type `disk_slowness_cleared` is recorded when disk slowness in a store has cleared. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `disk_slowness_detected` + +An event of type `disk_slowness_detected` is recorded when a store observes disk slowness +events. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `low_disk_space` + +An event of type `low_disk_space` is emitted when a store is reaching capacity, as we reach +certain thresholds. It is emitted periodically while we are in a low disk +state. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeID` | The node ID where the event was originated. | no | +| `StoreID` | | no | +| `PercentThreshold` | The free space percent threshold that we went under. | no | +| `AvailableBytes` | | no | +| `TotalBytes` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `node_decommissioned` + +An event of type `node_decommissioned` is recorded when a node is marked as +decommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_decommissioning` + +An event of type `node_decommissioning` is recorded when a node is marked as +decommissioning. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_join` + +An event of type `node_join` is recorded when a node joins the cluster. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_recommissioned` + +An event of type `node_recommissioned` is recorded when a decommissioning node is +recommissioned. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RequestingNodeID` | The node ID where the event was originated. | no | +| `TargetNodeID` | The node ID affected by the operation. | no | + +### `node_restart` + +An event of type `node_restart` is recorded when an existing node rejoins the cluster +after being offline. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_connection_timeout` + +An event of type `node_shutdown_connection_timeout` is recorded when SQL connections remain open +during shutdown, after waiting for the server.shutdown.connections.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still open after waiting for the client to close them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.connections.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `node_shutdown_transaction_timeout` + +An event of type `node_shutdown_transaction_timeout` is recorded when SQL transactions remain open +during shutdown, after waiting for the server.shutdown.transactions.timeout +to transpire. + + +| Field | Description | Sensitive | +|--|--|--| +| `Detail` | The detailed message, meant to be a human-understandable explanation. | no | +| `ConnectionsRemaining` | The number of connections still running SQL transactions after waiting for the client to end them. | no | +| `TimeoutMillis` | The amount of time the server waited for the client to close the connections, defined by server.shutdown.transactions.timeout. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `StartedAt` | The time when this node was last started. | no | +| `LastUp` | The approximate last time the node was up before the last restart. | no | + +### `tenant_shared_service_start` + +An event of type `tenant_shared_service_start` is recorded when a tenant server +is started inside the same process as the KV layer. + + +| Field | Description | Sensitive | +|--|--|--| +| `OK` | Whether the startup was successful. | no | +| `ErrorText` | If the startup failed, the text of the error. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +### `tenant_shared_service_stop` + +An event of type `tenant_shared_service_stop` is recorded when a tenant server +is shut down inside the same process as the KV layer. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event was originated. | no | +| `TenantID` | The ID of the tenant owning the service. | no | +| `InstanceID` | The ID of the server instance. | no | +| `TenantName` | The name of the tenant at the time the event was emitted. | yes | + +## Contention events + +Aggregated information about contention events. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `aggregated_contention_info` + +An event of type `aggregated_contention_info` is recorded periodically when contention events +are resolved. + + +| Field | Description | Sensitive | +|--|--|--| +| `WaitingStmtFingerprintId` | | no | +| `WaitingTxnFingerprintId` | | no | +| `BlockingTxnFingerprintId` | | no | +| `ContendedKey` | | partially | +| `Duration` | | no | +| `TableId` | Decoded key information (populated when key decoding is available). | no | +| `IndexId` | | no | +| `DatabaseName` | | no | +| `SchemaName` | | no | +| `TableName` | | no | +| `IndexName` | | no | +| `KeyColumnNames` | Decoded key column information. Arrays are parallel (same index = same column). Column names (schema metadata, safe). | no | +| `KeyColumnTypes` | Column types (schema metadata, safe). | no | +| `KeyColumnValues` | Column values (potentially sensitive user data). | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Debugging events + +Events in this category pertain to debugging operations performed by +operators or (more commonly) Cockroach Labs employees. These operations can +e.g. directly access and mutate internal state, breaking system invariants. + +Events in this category are logged to the `OPS` channel. + + +### `debug_recover_replica` + +An event of type `debug_recover_replica` is recorded when unsafe loss of quorum recovery is performed. + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `StoreID` | | no | +| `SurvivorReplicaID` | | no | +| `UpdatedReplicaID` | | no | +| `StartKey` | | yes | +| `EndKey` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +### `debug_send_kv_batch` + +An event of type `debug_send_kv_batch` is recorded when an arbitrary KV BatchRequest is submitted +to the cluster via the `debug send-kv-batch` CLI command. + + +| Field | Description | Sensitive | +|--|--|--| +| `BatchRequest` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `NodeID` | The node ID where the event originated. | no | +| `User` | The user which performed the operation. | yes | + +## Health events + +Events in this category pertain to the health of one or more servers. + +Events in this category are logged to the `HEALTH` channel. + + +### `hot_ranges_stats` + +An event of type `hot_ranges_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `RangeID` | | no | +| `Qps` | | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | yes | +| `LeaseholderNodeID` | LeaseholderNodeID indicates the Node ID that is the current leaseholder for the given range. | no | +| `WritesPerSecond` | Writes per second is the recent number of keys written per second on this range. | no | +| `ReadsPerSecond` | Reads per second is the recent number of keys read per second on this range. | no | +| `WriteBytesPerSecond` | Write bytes per second is the recent number of bytes written per second on this range. | no | +| `ReadBytesPerSecond` | Read bytes per second is the recent number of bytes read per second on this range. | no | +| `CPUTimePerSecond` | CPU time per second is the recent cpu usage in nanoseconds of this range. | no | +| `Databases` | Databases for the range. | no | +| `Tables` | Tables for the range | no | +| `Indexes` | Indexes for the range | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `runtime_stats` + +An event of type `runtime_stats` is recorded every 10 seconds as server health metrics. + + +| Field | Description | Sensitive | +|--|--|--| +| `MemRSSBytes` | The process resident set size. Expressed as bytes. | no | +| `GoroutineCount` | The number of goroutines. | no | +| `MemStackSysBytes` | The stack system memory used. Expressed as bytes. | no | +| `GoAllocBytes` | The memory allocated by Go. Expressed as bytes. | no | +| `GoTotalBytes` | The total memory allocated by Go but not released. Expressed as bytes. | no | +| `GoStatsStaleness` | The staleness of the Go memory statistics. Expressed in seconds. | no | +| `HeapFragmentBytes` | The amount of heap fragmentation. Expressed as bytes. | no | +| `HeapReservedBytes` | The amount of heap reserved. Expressed as bytes. | no | +| `HeapReleasedBytes` | The amount of heap released. Expressed as bytes. | no | +| `CGoAllocBytes` | The memory allocated outside of Go. Expressed as bytes. | no | +| `CGoTotalBytes` | The total memory allocated outside of Go but not released. Expressed as bytes. | no | +| `CGoCallRate` | The total number of calls outside of Go over time. Expressed as operations per second. | no | +| `CPUUserPercent` | The user CPU percentage. | no | +| `CPUSysPercent` | The system CPU percentage. | no | +| `GCPausePercent` | The GC pause percentage. | no | +| `GCRunCount` | The total number of GC runs. | no | +| `NetHostRecvBytes` | The bytes received on all network interfaces since this process started. | no | +| `NetHostSendBytes` | The bytes sent on all network interfaces since this process started. | no | +| `GoLimitBytes` | The soft Go memory limit in bytes. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Job events + +Events in this category pertain to long-running jobs that are orchestrated by +a node's job registry. These system processes can create and/or modify stored +objects during the course of their execution. + +A job might choose to emit multiple events during its execution when +transitioning from one "state" to another. +Egs: IMPORT/RESTORE will emit events on job creation and successful +completion. If the job fails, events will be emitted on job creation, +failure, and successful revert. + +Events in this category are logged to the `OPS` channel. + + +### `import` + +An event of type `import` is recorded when an import job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `restore` + +An event of type `restore` is recorded when a restore job is created and successful completion. +If the job fails, events will be emitted on job creation, failure, and +successful revert. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `JobID` | The ID of the job that triggered the event. | no | +| `JobType` | The type of the job that triggered the event. | no | +| `Description` | A description of the job that triggered the event. Some jobs populate the description with an approximate representation of the SQL statement run to create the job. | yes | +| `User` | The user account that triggered the event. | yes | +| `DescriptorIDs` | The object descriptors affected by the job. Set to zero for operations that don't affect descriptors. | yes | +| `Status` | The status of the job that triggered the event. This allows the job to indicate which phase execution it is in when the event is triggered. | no | + +### `status_change` + +An event of type `status_change` is recorded when a job changes statuses. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | The ID of the job that is changing statuses. | no | +| `JobType` | The type of the job that is changing statuses. | no | +| `Description` | A human parsable description of the status change | partially | +| `PreviousStatus` | The status that the job is transitioning out of | no | +| `NewStatus` | The status that the job has transitioned into | no | +| `RunNum` | The run number of the job. | no | +| `Error` | An error that may have occurred while the job was running. | yes | +| `FinalResumeErr` | An error that occurred that requires the job to be reverted. | yes | +| `User` | User is the owner of the job. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Miscellaneous SQL events + +Events in this category report miscellaneous SQL events. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own system.eventlog table. + +Events in this category are logged to the `OPS` channel. + + +### `delete_rewrite_inline_hints` + +An event of type `delete_rewrite_inline_hints` is recorded when a rewrite inline hint is +deleted via information_schema.crdb_delete_statement_hints. + + +| Field | Description | Sensitive | +|--|--|--| +| `StatementFingerprint` | The target statement fingerprint for which inline hints are being deleted. | no | +| `HintID` | The hint ID of the to-delete statement hint. | no | +| `DonorSql` | The donor sql of the deleted inline hint. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `delete_session_variable_hint` + +An event of type `delete_session_variable_hint` is recorded when a session variable hint is +deleted via information_schema.crdb_delete_statement_hints. + + +| Field | Description | Sensitive | +|--|--|--| +| `StatementFingerprint` | The target statement fingerprint for which the session variable hint is being deleted. | no | +| `HintID` | The hint ID of the deleted statement hint. | no | +| `VariableName` | The name of the session variable that was overridden. | no | +| `VariableValue` | The value of the session variable override. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rewrite_inline_hints` + +An event of type `rewrite_inline_hints` is recorded when a new inline-hints rewrite rule is added +via information_schema.crdb_rewrite_inline_hints or crdb_internal.inject_hint. + + +| Field | Description | Sensitive | +|--|--|--| +| `StatementFingerprint` | The target statement fingerprint for which inline hints are being rewritten. | no | +| `DonorSQL` | The donor statement providing the inline hints. | no | +| `HintID` | The hint ID of the newly created statement hint. | no | +| `Database` | The database to which the hint is scoped, if any. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `set_cluster_setting` + +An event of type `set_cluster_setting` is recorded when a cluster setting is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `DefaultValue` | The current default value of the cluster setting. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `set_session_variable_hint` + +An event of type `set_session_variable_hint` is recorded when a new session variable hint is +added via information_schema.crdb_set_session_variable_hint. + + +| Field | Description | Sensitive | +|--|--|--| +| `StatementFingerprint` | The target statement fingerprint for which the session variable is being overridden. | no | +| `VariableName` | The name of the session variable being overridden. | no | +| `VariableValue` | The value of the session variable override. | yes | +| `HintID` | The hint ID of the newly created statement hint. | no | +| `Database` | The database to which the hint is scoped, if any. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `set_tenant_cluster_setting` + +An event of type `set_tenant_cluster_setting` is recorded when a cluster setting override +is changed, either for another tenant or for all tenants. + + +| Field | Description | Sensitive | +|--|--|--| +| `SettingName` | The name of the affected cluster setting. | no | +| `Value` | The new value of the cluster setting. | yes | +| `TenantId` | The target Tenant ID. Empty if targeting all tenants. | no | +| `AllTenants` | Whether the override applies to all tenants. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Access Audit Events + +Events in this category are generated when a table has been +marked as audited via `ALTER TABLE ... EXPERIMENTAL_AUDIT SET`. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SENSITIVE_ACCESS` channel. + + +### `admin_query` + +An event of type `admin_query` is recorded when a user with admin privileges (the user +is directly or indirectly a member of the admin role) executes a query. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `role_based_audit_event` + +An event of type `role_based_audit_event` is an audit event recorded when an executed query belongs to a user whose role +membership(s) correspond to any role that is enabled to emit an audit log via the sql.log.user_audit +cluster setting. + + +| Field | Description | Sensitive | +|--|--|--| +| `Role` | The configured audit role that emitted this log. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sensitive_table_access` + +An event of type `sensitive_table_access` is recorded when an access is performed to +a table marked as audited. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table being audited. | yes | +| `AccessMode` | How the table was accessed (r=read / rw=read/write). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `unsafe_internals_accessed` + +UnsafeInternalsAccess is recorded when a query accesses unsafe internals +using the allow_unsafe_internals override. + + +| Field | Description | Sensitive | +|--|--|--| +| `Query` | The query that triggered the unsafe internals access. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_internals_denied` + +An event of type `unsafe_internals_denied` is recorded when a query attempts to access unsafe internals +but lacks the appropriate session variables. + + +| Field | Description | Sensitive | +|--|--|--| +| `Query` | The query that triggered the unsafe internals access. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Execution Log + +Events in this category report executed queries. + +Note: These events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `query_execute` + +An event of type `query_execute` is recorded when a query is executed, +and the cluster setting `sql.log.all_statements.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +## SQL Logical Schema Changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the SQL logical +schema. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SQL_SCHEMA` channel. + + +### `alter_database_add_region` + +An event of type `alter_database_add_region` is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being added. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_drop_region` + +AlterDatabaseAddRegion is recorded when a region is added to a database. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `RegionName` | The region being dropped. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_placement` + +An event of type `alter_database_placement` is recorded when the database placement is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `Placement` | The new placement policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_primary_region` + +An event of type `alter_database_primary_region` is recorded when a primary region is added/modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `PrimaryRegionName` | The new primary region. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_database_set_zone_config_extension` + +An event of type `alter_database_set_zone_config_extension` is recorded when a zone config extension is changed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `alter_database_survival_goal` + +An event of type `alter_database_survival_goal` is recorded when the survival goal is modified. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database. | no | +| `SurvivalGoal` | The new survival goal | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_function_options` + +An event of type `alter_function_options` is recorded when a user-defined function's options are +altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_index` + +An event of type `alter_index` is recorded when an index is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_index_visible` + +AlterIndex is recorded when an index visibility is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `NotVisible` | Set true if index is not visible. NOTE: THIS FIELD IS DEPRECATED in favor of invisibility. | no | +| `Invisibility` | The new invisibility of the affected index. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_sequence` + +An event of type `alter_sequence` is recorded when a sequence is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_table` + +An event of type `alter_table` is recorded when a table is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update, if any. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_type` + +EventAlterType is recorded when a user-defined type is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_column` + +An event of type `comment_on_column` is recorded when a column is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected column. | no | +| `ColumnName` | The affected column. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_constraint` + +An event of type `comment_on_constraint` is recorded when an constraint is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected constraint. | no | +| `ConstraintName` | The name of the affected constraint. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_database` + +CommentOnTable is recorded when a database is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_index` + +An event of type `comment_on_index` is recorded when an index is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_schema` + + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | Name of the affected schema. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_table` + +An event of type `comment_on_table` is recorded when a table is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `comment_on_type` + +An event of type `comment_on_type` is recorded when a type is commented. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | +| `Comment` | The new comment. | yes | +| `NullComment` | Set to true if the comment was removed entirely. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_database` + +An event of type `create_database` is recorded when a database is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the new database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_function` + +An event of type `create_function` is recorded when a user-defined function is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the created function. | no | +| `IsReplace` | If the new function is a replace of an existing function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_index` + +An event of type `create_index` is recorded when an index is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the new index. | no | +| `IndexName` | The name of the new index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_policy` + +An event of type `create_policy` is recorded when a policy is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the created policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_schema` + +An event of type `create_schema` is recorded when a schema is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the new schema. | no | +| `Owner` | The name of the owner for the new schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_sequence` + +An event of type `create_sequence` is recorded when a sequence is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the new sequence. | no | +| `Owner` | The name of the owner for the new sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_statistics` + +An event of type `create_statistics` is recorded when statistics are collected for a +table. + +Events of this type are only collected when the cluster setting +`sql.stats.post_events.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table for which the statistics were created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_table` + +An event of type `create_table` is recorded when a table is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the new table. | no | +| `Owner` | The name of the owner for the new table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_trigger` + +An event of type `create_trigger` is recorded when a trigger is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the created trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_type` + +An event of type `create_type` is recorded when a user-defined type is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the new type. | no | +| `Owner` | The name of the owner for the new type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_view` + +An event of type `create_view` is recorded when a view is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the new view. | no | +| `Owner` | The name of the owner of the new view. | no | +| `ViewQuery` | The SQL selection clause used to define the view. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_database` + +An event of type `drop_database` is recorded when a database is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | no | +| `DroppedSchemaObjects` | The names of the schemas dropped by a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_function` + +An event of type `drop_function` is recorded when a user-defined function is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | Name of the dropped function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_index` + +An event of type `drop_index` is recorded when an index is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the table containing the affected index. | no | +| `IndexName` | The name of the affected index. | no | +| `MutationID` | The mutation ID for the asynchronous job that is processing the index update. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_policy` + +An event of type `drop_policy` is recorded when a policy is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the policy's table. | no | +| `PolicyName` | Name of the dropped policy. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_schema` + +An event of type `drop_schema` is recorded when a schema is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_sequence` + +An event of type `drop_sequence` is recorded when a sequence is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `SequenceName` | The name of the affected sequence. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_table` + +An event of type `drop_table` is recorded when a table is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_trigger` + +An event of type `drop_trigger` is recorded when a trigger is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | Name of the trigger's table. | no | +| `TriggerName` | Name of the dropped trigger. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_type` + +An event of type `drop_type` is recorded when a user-defined type is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_view` + +An event of type `drop_view` is recorded when a view is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the affected view. | no | +| `CascadeDroppedViews` | The names of the views dropped as a result of a cascade operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `finish_schema_change` + +An event of type `finish_schema_change` is recorded when a previously initiated schema +change has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to complete. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `finish_schema_change_rollback` + +An event of type `finish_schema_change_rollback` is recorded when a previously +initiated schema change rollback has completed. + + +| Field | Description | Sensitive | +|--|--|--| +| `LatencyNanos` | The amount of time the schema change job took to rollback. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `force_delete_table_data_entry` + + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorID` | | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `refresh_materialized_view` + +An event of type `refresh_materialized_view` is recorded when a materialized view is refreshed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ViewName` | The name of the materialized view being refreshed. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_database` + +An event of type `rename_database` is recorded when a database is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The old name of the affected database. | no | +| `NewDatabaseName` | The new name of the affected database. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_function` + +An event of type `rename_function` is recorded when a user-defined function is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The old name of the affected function. | no | +| `NewFunctionName` | The new name of the affected function. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_schema` + +An event of type `rename_schema` is recorded when a schema is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The old name of the affected schema. | no | +| `NewSchemaName` | The new name of the affected schema. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_table` + +An event of type `rename_table` is recorded when a table, sequence or view is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The old name of the affected table. | no | +| `NewTableName` | The new name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `rename_type` + +An event of type `rename_type` is recorded when a user-defined type is renamed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The old name of the affected type. | no | +| `NewTypeName` | The new name of the affected type. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `reverse_schema_change` + +An event of type `reverse_schema_change` is recorded when an in-progress schema change +encounters a problem and is reversed. + + +| Field | Description | Sensitive | +|--|--|--| +| `Error` | The error encountered that caused the schema change to be reversed. The specific format of the error is variable and can change across releases without warning. | partially | +| `SQLSTATE` | The SQLSTATE code for the error. | no | +| `LatencyNanos` | The amount of time the schema change job took before being reverted. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `MutationID` | The descriptor mutation that this schema change was processing. | no | + +### `set_schema` + +An event of type `set_schema` is recorded when a table, view, sequence or type's schema is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DescriptorName` | The old name of the affected descriptor. | no | +| `NewDescriptorName` | The new name of the affected descriptor. | no | +| `DescriptorType` | The descriptor type being changed (table, view, sequence, type). | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `truncate_table` + +An event of type `truncate_table` is recorded when a table is truncated. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_delete_descriptor` + +An event of type `unsafe_delete_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_delete_descriptor(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_delete_namespace_entry` + +An event of type `unsafe_delete_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_delete_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_upsert_descriptor` + +An event of type `unsafe_upsert_descriptor` is recorded when a descriptor is written +using crdb_internal.unsafe_upsert_descriptor(). + + +| Field | Description | Sensitive | +|--|--|--| +| `PreviousDescriptor` | | yes | +| `NewDescriptor` | | yes | +| `Force` | | no | +| `ForceNotice` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `unsafe_upsert_namespace_entry` + +An event of type `unsafe_upsert_namespace_entry` is recorded when a namespace entry is +written using crdb_internal.unsafe_upsert_namespace_entry(). + +The fields of this event type are reserved and can change across +patch releases without advance notice. + + +| Field | Description | Sensitive | +|--|--|--| +| `ParentID` | | no | +| `ParentSchemaID` | | no | +| `Name` | | no | +| `PreviousID` | | no | +| `Force` | | no | +| `FailedValidation` | | no | +| `ValidationErrors` | | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +## SQL Privilege changes + +Events in this category pertain to DDL (Data Definition Language) +operations performed by SQL statements that modify the privilege +grants for stored objects. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `PRIVILEGES` channel. + + +### `alter_database_owner` + +An event of type `alter_database_owner` is recorded when a database's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the database being affected. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_default_privileges` + +An event of type `alter_default_privileges` is recorded when default privileges are changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | +| `RoleName` | Either role_name should be populated or for_all_roles should be true. The role having its default privileges altered. | yes | +| `ForAllRoles` | Identifies if FOR ALL ROLES is used. | no | +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `alter_function_owner` + +AlterTableOwner is recorded when the owner of a user-defined function is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `FunctionName` | The name of the affected user-defined function. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_schema_owner` + +An event of type `alter_schema_owner` is recorded when a schema's owner is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_table_owner` + +An event of type `alter_table_owner` is recorded when the owner of a table, view or sequence is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected object. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `alter_type_owner` + +An event of type `alter_type_owner` is recorded when the owner of a user-defiend type is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | +| `Owner` | The name of the new owner. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `change_database_privilege` + +An event of type `change_database_privilege` is recorded when privileges are +added to / removed from a user for a database object. + + +| Field | Description | Sensitive | +|--|--|--| +| `DatabaseName` | The name of the affected database. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_function_privilege` + + + +| Field | Description | Sensitive | +|--|--|--| +| `FuncName` | The name of the affected function. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_schema_privilege` + +An event of type `change_schema_privilege` is recorded when privileges are added to / +removed from a user for a schema object. + + +| Field | Description | Sensitive | +|--|--|--| +| `SchemaName` | The name of the affected schema. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_table_privilege` + +An event of type `change_table_privilege` is recorded when privileges are added to / removed +from a user for a table, sequence or view object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The name of the affected table. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +### `change_type_privilege` + +An event of type `change_type_privilege` is recorded when privileges are added to / +removed from a user for a type object. + + +| Field | Description | Sensitive | +|--|--|--| +| `TypeName` | The name of the affected type. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Grantee` | The user/role affected by the grant or revoke operation. | yes | +| `GrantedPrivileges` | The privileges being granted to the grantee. | no | +| `RevokedPrivileges` | The privileges being revoked from the grantee. | no | + +## SQL Session events + +Events in this category report SQL client connections +and sessions. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these miscellaneous events are +preserved in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `SESSIONS` channel. + + +### `client_authentication_failed` + +An event of type `client_authentication_failed` is reported when a client session +did not authenticate successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Reason` | The reason for the authentication failure. See below for possible values for type `AuthFailReason`. | no | +| `Detail` | The detailed error for the authentication failure. | partially | +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | partially | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | partially | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | partially | + +### `client_authentication_info` + +An event of type `client_authentication_info` is reported for intermediate +steps during the authentication process. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used, once known. | no | +| `Info` | The authentication progress message. | partially | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | partially | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | partially | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | partially | + +### `client_authentication_ok` + +An event of type `client_authentication_ok` is reported when a client session +was authenticated successfully. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Method` | The authentication method used. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | partially | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | partially | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | partially | + +### `client_connection_end` + +An event of type `client_connection_end` is reported when a client connection +is closed. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | partially | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_connection_start` + +An event of type `client_connection_start` is reported when a client connection +is established. This is reported even when authentication +fails, and even for simple cancellation messages. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_connections.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | partially | +| `SessionID` | The connection's hex encoded session id. | no | + +### `client_session_end` + +An event of type `client_session_end` is reported when a client session +is completed. + +Events of this type are only emitted when the cluster setting +`server.auth_log.sql_sessions.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `Duration` | The duration of the connection in nanoseconds. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `InstanceID` | The instance ID (not tenant ID) of the SQL server where the event was originated. | no | +| `Network` | The network protocol for this connection: tcp4, tcp6, unix, etc. | no | +| `RemoteAddress` | The remote address of the SQL client. Note that when using a proxy or other intermediate server, this field will contain the address of the intermediate server. | partially | +| `SessionID` | The connection's hex encoded session id. | no | +| `Transport` | The connection type after transport negotiation. | no | +| `User` | The database username the session is for. This username will have undergone case-folding and Unicode normalization. | partially | +| `SystemIdentity` | The original system identity provided by the client, if an identity mapping was used per Host-Based Authentication rules. This may be a GSSAPI or X.509 principal or any other external value, so no specific assumptions should be made about the contents of this field. | partially | + +## SQL Slow Query Log + +Events in this category report slow query execution. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +In version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `SQL_PERF` instead of `SQL_EXEC`. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `large_row` + +An event of type `large_row` is recorded when a statement tries to write a row larger than +cluster setting `sql.guardrails.max_row_size_log` to the database. Multiple +LargeRow events will be recorded for statements writing multiple large rows. +LargeRow events are recorded before the transaction commits, so in the case +of transaction abort there will not be a corresponding row in the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `scan_row_count_misestimate` + +An event of type `scan_row_count_misestimate` is recorded when the optimizer's row count estimate +for a logical scan differs significantly from the actual number of rows read, +and cluster setting `sql.log.scan_row_count_misestimate.enabled` is set. + + +| Field | Description | Sensitive | +|--|--|--| +| `TableName` | The fully qualified name of the table being scanned. | no | +| `IndexName` | The name of the index being scanned. | no | +| `EstimatedRowCount` | The optimizer's estimated row count for the scan. | no | +| `ActualRowCount` | The actual number of rows read by all processors performing the scan. | no | +| `NanosSinceStatsCollected` | Time in nanoseconds that have passed since full stats were collected on the table. | no | +| `EstimatedStaleness` | Estimated fraction of stale rows in the table based on the time since stats were last collected. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `slow_query` + +An event of type `slow_query` is recorded when a query triggers the "slow query" condition. + +As of this writing, the condition requires: +- the cluster setting `sql.log.slow_query.latency_threshold` +set to a non-zero value, AND +- EITHER of the following conditions: +- the actual age of the query exceeds the configured threshold; AND/OR +- the query performs a full table/index scan AND the cluster setting +`sql.log.slow_query.experimental_full_table_scans.enabled` is set. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit` + +An event of type `txn_rows_read_limit` is recorded when a transaction tries to read more rows than +cluster setting `sql.defaults.transaction_rows_read_log`. There will only be +a single record for a single transaction (unless it is retried) even if there +are more statement within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit` + +An event of type `txn_rows_written_limit` is recorded when a transaction tries to write more rows +than cluster setting `sql.defaults.transaction_rows_written_log`. There will +only be a single record for a single transaction (unless it is retried) even +if there are more mutation statements within the transaction that haven't +been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL Slow Query Log (Internal) + +Events in this category report slow query execution by +internal executors, i.e., when CockroachDB internally issues +SQL statements. + +Note: these events are not written to `system.eventlog`, even +when the cluster setting `system.eventlog.enabled` is set. They +are only emitted via external logging. + +In version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `SQL_INTERNAL_PERF` instead of `SQL_EXEC`. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `large_row_internal` + +An event of type `large_row_internal` is recorded when an internal query tries to write a row +larger than cluster settings `sql.guardrails.max_row_size_log` or +`sql.guardrails.max_row_size_err` to the database. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `RowSize` | | no | +| `TableID` | | no | +| `FamilyID` | | no | +| `PrimaryKey` | | yes | + +### `slow_query_internal` + +An event of type `slow_query_internal` is recorded when a query triggers the "slow query" condition, +and the cluster setting `sql.log.slow_query.internal_queries.enabled` is +set. +See the documentation for the event type `slow_query` for details about +the "slow query" condition. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `txn_rows_read_limit_internal` + +An event of type `txn_rows_read_limit_internal` is recorded when an internal transaction tries to +read more rows than cluster setting `sql.defaults.transaction_rows_read_log` +or `sql.defaults.transaction_rows_read_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +### `txn_rows_written_limit_internal` + +An event of type `txn_rows_written_limit_internal` is recorded when an internal transaction tries to +write more rows than cluster setting +`sql.defaults.transaction_rows_written_log` or +`sql.defaults.transaction_rows_written_err`. There will only be a single +record for a single transaction (unless it is retried) even if there are more +mutation statements within the transaction that haven't been executed yet. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `TxnID` | TxnID is the ID of the transaction that hit the row count limit. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `NumRows` | NumRows is the number of rows written/read (depending on the event type) by the transaction that reached the corresponding guardrail. | no | + +## SQL User and Role operations + +Events in this category pertain to SQL statements that modify the +properties of users and roles. + +They are relative to a particular SQL tenant. +In a multi-tenant setup, copies of DDL-related events are preserved +in each tenant's own `system.eventlog` table. + +Events in this category are logged to the `USER_ADMIN` channel. + + +### `alter_role` + +An event of type `alter_role` is recorded when a role is altered. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | +| `Options` | The options set on the user/role. | no | +| `SetInfo` | Information corresponding to an ALTER ROLE SET statement. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `create_role` + +An event of type `create_role` is recorded when a role is created. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the new user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `drop_role` + +An event of type `drop_role` is recorded when a role is dropped. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the affected user/role. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `grant_role` + +An event of type `grant_role` is recorded when a role is granted. + + +| Field | Description | Sensitive | +|--|--|--| +| `GranteeRoles` | The roles being granted to. | yes | +| `Members` | The roles being granted. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | + +### `password_hash_converted` + +An event of type `password_hash_converted` is recorded when the password credentials +are automatically converted server-side. + + +| Field | Description | Sensitive | +|--|--|--| +| `RoleName` | The name of the user/role whose credentials have been converted. | yes | +| `OldMethod` | The previous hash method. | no | +| `NewMethod` | The new hash method. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Sampled SQL Events + +Events in this category report sample of SQL events. + +Events in this category are logged to the `SQL_EXEC` channel. + + +### `m_v_c_c_iterator_stats` + +Internal storage iteration statistics for a single execution. + + +| Field | Description | Sensitive | +|--|--|--| +| `StepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `StepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `SeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `BlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `BlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `KeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `ValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `PointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `RangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | + + + +### `sampled_exec_stats` + +An event of type `sampled_exec_stats` contains execution statistics that apply to both statements +and transactions. These stats as a whole are collected using a sampling approach. +These exec stats are meant to contain the same fields as ExecStats in +apps_stats.proto but are for a single execution rather than aggregated executions. +Fields in this struct should be updated in sync with apps_stats.proto. + + +| Field | Description | Sensitive | +|--|--|--| +| `NetworkBytes` | NetworkBytes collects the number of bytes sent over the network by DistSQL components. | no | +| `MaxMemUsage` | MaxMemUsage collects the maximum memory usage that occurred on a node. | no | +| `ContentionTime` | ContentionTime collects the time in seconds statements in the transaction spent contending. | no | +| `NetworkMessages` | NetworkMessages collects the number of messages that were sent over the network by DistSQL components. | no | +| `MaxDiskUsage` | MaxDiskUsage collects the maximum temporary disk usage that occurred. This is set in cases where a query had to spill to disk, e.g. when performing a large sort where not all of the tuples fit in memory. | no | +| `CPUSQLNanos` | CPUSQLNanos collects the CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `MVCCIteratorStats` | Internal storage iteration statistics. | yes | +| `AdmissionWaitTime` | AdmissionWaitTime is the cumulative time spent in admission control queues. | no | + + + +### `sampled_query` + +An event of type `sampled_query` is the SQL query event logged to the telemetry channel. It +contains common SQL event/execution details. + +Note: in version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `TELEMETRY` instead of `SQL_EXEC`. + + +| Field | Description | Sensitive | +|--|--|--| +| `SkippedQueries` | skipped_queries indicate how many SQL statements were not considered for sampling prior to this one. If the field is omitted, or its value is zero, this indicates that no statement was omitted since the last event. | no | +| `CostEstimate` | Cost of the query as estimated by the optimizer. | no | +| `Distribution` | The distribution of the DistSQL query plan (local, full, or partial). | no | +| `PlanGist` | The query's plan gist bytes as a base64 encoded string. | no | +| `SessionID` | SessionID is the ID of the session that initiated the query. | no | +| `Database` | Name of the database that initiated the query. | no | +| `StatementID` | Statement ID of the query. | no | +| `TransactionID` | Transaction ID of the query. | no | +| `MaxFullScanRowsEstimate` | Maximum number of rows scanned by a full scan, as estimated by the optimizer. | no | +| `TotalScanRowsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer. | no | +| `OutputRowsEstimate` | The number of rows output by the query, as estimated by the optimizer. | no | +| `StatsAvailable` | Whether table statistics were available to the optimizer when planning the query. | no | +| `NanosSinceStatsCollected` | The maximum number of nanoseconds that have passed since stats were collected on any table scanned by this query. | no | +| `BytesRead` | The number of bytes read from disk. | no | +| `RowsRead` | The number of rows read from disk. | no | +| `RowsWritten` | The number of rows written. | no | +| `InnerJoinCount` | The number of inner joins in the query plan. | no | +| `LeftOuterJoinCount` | The number of left (or right) outer joins in the query plan. | no | +| `FullOuterJoinCount` | The number of full outer joins in the query plan. | no | +| `SemiJoinCount` | The number of semi joins in the query plan. | no | +| `AntiJoinCount` | The number of anti joins in the query plan. | no | +| `IntersectAllJoinCount` | The number of intersect all joins in the query plan. | no | +| `ExceptAllJoinCount` | The number of except all joins in the query plan. | no | +| `HashJoinCount` | The number of hash joins in the query plan. | no | +| `CrossJoinCount` | The number of cross joins in the query plan. | no | +| `IndexJoinCount` | The number of index joins in the query plan. | no | +| `LookupJoinCount` | The number of lookup joins in the query plan. | no | +| `MergeJoinCount` | The number of merge joins in the query plan. | no | +| `InvertedJoinCount` | The number of inverted joins in the query plan. | no | +| `ApplyJoinCount` | The number of apply joins in the query plan. | no | +| `ZigZagJoinCount` | The number of zig zag joins in the query plan. | no | +| `ContentionNanos` | The duration of time in nanoseconds that the query experienced contention. | no | +| `Regions` | The regions of the nodes where SQL processors ran. | no | +| `NetworkBytesSent` | The number of network bytes by DistSQL components. | no | +| `MaxMemUsage` | The maximum amount of memory usage by nodes for this query. | no | +| `MaxDiskUsage` | The maximum amount of disk usage by nodes for this query. | no | +| `KVBytesRead` | The number of bytes read at the KV layer for this query. | no | +| `KVPairsRead` | The number of key-value pairs read at the KV layer for this query. | no | +| `KVRowsRead` | The number of rows read at the KV layer for this query. | no | +| `NetworkMessages` | The number of network messages sent by nodes for this query by DistSQL components. | no | +| `IndexRecommendations` | Generated index recommendations for this query. | no | +| `ScanCount` | The number of scans in the query plan. | no | +| `ScanWithStatsCount` | The number of scans using statistics (including forecasted statistics) in the query plan. | no | +| `ScanWithStatsForecastCount` | The number of scans using forecasted statistics in the query plan. | no | +| `TotalScanRowsWithoutForecastsEstimate` | Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. | no | +| `NanosSinceStatsForecasted` | The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. | no | +| `Indexes` | The list of indexes used by this query. | no | +| `CpuTimeNanos` | Collects the cumulative CPU time spent executing SQL operations in nanoseconds. Currently, it is only collected for statements without mutations that have a vectorized plan. | no | +| `KvGrpcCalls` | The number of grpc calls done to get data form KV nodes | no | +| `KvTimeNanos` | Cumulated time spent waiting for a KV request. This includes disk IO time and potentially network time (if any of the keys are not local). | no | +| `ServiceLatencyNanos` | The time to service the query, from start of parse to end of execute. | no | +| `OverheadLatencyNanos` | The difference between service latency and the sum of parse latency + plan latency + run latency . | no | +| `RunLatencyNanos` | The time to run the query and fetch or compute the result rows. | no | +| `PlanLatencyNanos` | The time to transform the AST into a logical query plan. | no | +| `IdleLatencyNanos` | The time between statement executions in a transaction | no | +| `ParseLatencyNanos` | The time to transform the SQL string into an abstract syntax tree (AST). | no | +| `MvccStepCount` | StepCount collects the number of times the iterator moved forward or backward over the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccStepCountInternal` | StepCountInternal collects the number of times the iterator moved forward or backward over LSM internal keys. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCount` | SeekCount collects the number of times the iterator moved to a specific key/value pair in the DB's underlying storage keyspace. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccSeekCountInternal` | SeekCountInternal collects the number of times the iterator moved to a specific LSM internal key. For details, see pkg/storage/engine.go and pkg/sql/opt/exec/factory.go. | no | +| `MvccBlockBytes` | BlockBytes collects the bytes in the loaded SSTable data blocks. For details, see pebble.InternalIteratorStats. | no | +| `MvccBlockBytesInCache` | BlockBytesInCache collects the subset of BlockBytes in the block cache. For details, see pebble.InternalIteratorStats. | no | +| `MvccKeyBytes` | KeyBytes collects the bytes in keys that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccValueBytes` | ValueBytes collects the bytes in values that were iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointCount` | PointCount collects the count of point keys iterated over. For details, see pebble.InternalIteratorStats. | no | +| `MvccPointsCoveredByRangeTombstones` | PointsCoveredByRangeTombstones collects the count of point keys that were iterated over that were covered by range tombstones. For details, see pebble.InternalIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyCount` | RangeKeyCount collects the count of range keys encountered during iteration. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeyContainedPoints` | RangeKeyContainedPoints collects the count of point keys encountered within the bounds of a range key. For details, see pebble.RangeKeyIteratorStats and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `MvccRangeKeySkippedPoints` | RangeKeySkippedPoints collects the count of the subset of ContainedPoints point keys that were skipped during iteration due to range-key masking. For details, see pkg/storage/engine.go, pebble.RangeKeyIteratorStats, and docs/tech-notes/mvcc-range-tombstones.md. | no | +| `SchemaChangerMode` | SchemaChangerMode is the mode that was used to execute the schema change, if any. | no | +| `SQLInstanceIDs` | SQLInstanceIDs is a list of all the SQL instances used in this statement's execution. | no | +| `KVNodeIDs` | KVNodeIDs is a list of all the KV nodes used in this statement's execution. | no | +| `StatementFingerprintID` | Statement fingerprint ID of the query. | no | +| `UsedFollowerRead` | UsedFollowerRead indicates whether at least some reads were served by the follower replicas. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `ExecMode` | How the statement was being executed (exec/prepare, etc.) | no | +| `NumRows` | Number of rows returned. For mutation statements (INSERT, etc) that do not produce result rows, this field reports the number of rows affected. | no | +| `SQLSTATE` | The SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | The text of the error if any. | partially | +| `Age` | Age of the query in milliseconds. | no | +| `NumRetries` | Number of retries, when the txn was reretried automatically by the server. | no | +| `FullTableScan` | Whether the query contains a full table scan. | no | +| `FullIndexScan` | Whether the query contains a full secondary index scan of a non-partial index. | no | +| `TxnCounter` | The sequence number of the SQL transaction inside its session. | no | +| `BulkJobId` | The job id for bulk job (IMPORT/BACKUP/RESTORE). | no | +| `StmtPosInTxn` | The statement's index in the transaction, starting at 1. | no | + +### `sampled_transaction` + +An event of type `sampled_transaction` is the event logged to telemetry at the end of transaction execution. + +Note: in version 26.1, these events moved to the `SQL_EXEC` channel. +To test compatability prior to this, set the cluster setting +`log.channel_compatibility_mode.enabled` to true. This will send the +events to `TELEMETRY` instead of `SQL_EXEC`. + + +| Field | Description | Sensitive | +|--|--|--| +| `User` | User is the user account that triggered the transaction. The special usernames `root` and `node` are not considered sensitive. | depends | +| `ApplicationName` | ApplicationName is the application name for the session where the transaction was executed. This is included in the event to ease filtering of logging output by application. | no | +| `TxnCounter` | TxnCounter is the sequence number of the SQL transaction inside its session. | no | +| `SessionID` | SessionID is the ID of the session that initiated the transaction. | no | +| `TransactionID` | TransactionID is the id of the transaction. | no | +| `Committed` | Committed indicates if the transaction committed successfully. We want to include this value even if it is false. | no | +| `ImplicitTxn` | ImplicitTxn indicates if the transaction was an implicit one. We want to include this value even if it is false. | no | +| `StartTimeUnixNanos` | StartTimeUnixNanos is the time the transaction was started. Expressed as unix time in nanoseconds. | no | +| `EndTimeUnixNanos` | EndTimeUnixNanos the time the transaction finished (either committed or aborted). Expressed as unix time in nanoseconds. | no | +| `ServiceLatNanos` | ServiceLatNanos is the time to service the whole transaction, from start to end of execution. | no | +| `SQLSTATE` | SQLSTATE is the SQLSTATE code for the error, if an error was encountered. Empty/omitted if no error. | no | +| `ErrorText` | ErrorText is the text of the error if any. | partially | +| `NumRetries` | NumRetries is the number of time when the txn was retried automatically by the server. | no | +| `LastAutoRetryReason` | LastAutoRetryReason is a string containing the reason for the last automatic retry. | partially | +| `NumRows` | NumRows is the total number of rows returned across all statements. | no | +| `RetryLatNanos` | RetryLatNanos is the amount of time spent retrying the transaction. | no | +| `CommitLatNanos` | CommitLatNanos is the amount of time spent committing the transaction after all statement operations. | no | +| `IdleLatNanos` | IdleLatNanos is the amount of time spent waiting for the client to send statements while the transaction is open. | no | +| `BytesRead` | BytesRead is the number of bytes read from disk. | no | +| `RowsRead` | RowsRead is the number of rows read from disk. | no | +| `RowsWritten` | RowsWritten is the number of rows written to disk. | no | +| `SampledExecStats` | SampledExecStats is a nested field containing execution statistics. This field will be omitted if the stats were not sampled. | yes | +| `SkippedTransactions` | SkippedTransactions is the number of transactions that were skipped as part of sampling prior to this one. We only count skipped transactions when telemetry logging is enabled and the sampling mode is set to "transaction". | no | +| `TransactionFingerprintID` | TransactionFingerprintID is the fingerprint ID of the transaction. This can be used to find the transaction in the console. | no | +| `StatementFingerprintIDs` | StatementFingerprintIDs is an array of statement fingerprint IDs belonging to this transaction. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Storage telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `level_stats` + +An event of type `level_stats` contains per-level statistics for an LSM. + + +| Field | Description | Sensitive | +|--|--|--| +| `Level` | level is the level ID in a LSM (e.g. level(L0) == 0, etc.) | no | +| `NumFiles` | num_files is the number of files in the level (gauge). | no | +| `SizeBytes` | size_bytes is the size of the level, in bytes (gauge). | no | +| `Score` | score is the compaction score of the level (gauge). | no | +| `BytesIn` | bytes_in is the number of bytes written to this level (counter). | no | +| `BytesIngested` | bytes_ingested is the number of bytes ingested into this level (counter). | no | +| `BytesMoved` | bytes_moved is the number of bytes moved into this level via a move-compaction (counter). | no | +| `BytesRead` | bytes_read is the number of bytes read from this level, during compactions (counter). | no | +| `BytesCompacted` | bytes_compacted is the number of bytes written to this level during compactions (counter). | no | +| `BytesFlushed` | bytes flushed is the number of bytes flushed to this level. This value is always zero for levels other than L0 (counter). | no | +| `TablesCompacted` | tables_compacted is the count of tables compacted into this level (counter). | no | +| `TablesFlushed` | tables_flushed is the count of tables flushed into this level (counter). | no | +| `TablesIngested` | tables_ingested is the count of tables ingested into this level (counter). | no | +| `TablesMoved` | tables_moved is the count of tables moved into this level via move-compactions (counter). | no | +| `NumSublevels` | num_sublevel is the count of sublevels for the level. This value is always zero for levels other than L0 (gauge). | no | + + + +### `store_stats` + +An event of type `store_stats` contains per store stats. + +Note that because stats are scoped to the lifetime of the process, counters +(and certain gauges) will be reset across node restarts. + + +| Field | Description | Sensitive | +|--|--|--| +| `NodeId` | node_id is the ID of the node. | no | +| `StoreId` | store_id is the ID of the store. | no | +| `Levels` | levels is a nested message containing per-level statistics. | yes | +| `CacheSize` | cache_size is the size of the cache for the store, in bytes (gauge). | no | +| `CacheCount` | cache_count is the number of items in the cache (gauge). | no | +| `CacheHits` | cache_hits is the number of cache hits (counter). | no | +| `CacheMisses` | cache_misses is the number of cache misses (counter). | no | +| `CompactionCountDefault` | compaction_count_default is the count of default compactions (counter). | no | +| `CompactionCountDeleteOnly` | compaction_count_delete_only is the count of delete-only compactions (counter). | no | +| `CompactionCountElisionOnly` | compaction_count_elision_only is the count of elision-only compactions (counter). | no | +| `CompactionCountMove` | compaction_count_move is the count of move-compactions (counter). | no | +| `CompactionCountRead` | compaction_count_read is the count of read-compactions (counter). | no | +| `CompactionCountRewrite` | compaction_count_rewrite is the count of rewrite-compactions (counter). | no | +| `CompactionNumInProgress` | compactions_num_in_progress is the number of compactions in progress (gauge). | no | +| `CompactionMarkedFiles` | compaction_marked_files is the count of files marked for compaction (gauge). | no | +| `FlushCount` | flush_count is the number of flushes (counter). | no | +| `FlushIngestCount` | | no | +| `FlushIngestTableCount` | | no | +| `FlushIngestTableBytes` | | no | +| `IngestCount` | ingest_count is the number of successful ingest operations (counter). | no | +| `MemtableSize` | memtable_size is the total size allocated to all memtables and (large) batches, in bytes (gauge). | no | +| `MemtableCount` | memtable_count is the count of memtables (gauge). | no | +| `MemtableZombieCount` | memtable_zombie_count is the count of memtables no longer referenced by the current DB state, but still in use by an iterator (gauge). | no | +| `MemtableZombieSize` | memtable_zombie_size is the size, in bytes, of all zombie memtables (gauge). | no | +| `WalLiveCount` | wal_live_count is the count of live WAL files (gauge). | no | +| `WalLiveSize` | wal_live_size is the size, in bytes, of live data in WAL files. With WAL recycling, this value is less than the actual on-disk size of the WAL files (gauge). | no | +| `WalObsoleteCount` | wal_obsolete_count is the count of obsolete WAL files (gauge). | no | +| `WalObsoleteSize` | wal_obsolete_size is the size of obsolete WAL files, in bytes (gauge). | no | +| `WalPhysicalSize` | wal_physical_size is the size, in bytes, of the WAL files on disk (gauge). | no | +| `WalBytesIn` | wal_bytes_in is the number of logical bytes written to the WAL (counter). | no | +| `WalBytesWritten` | wal_bytes_written is the number of bytes written to the WAL (counter). | no | +| `TableObsoleteCount` | table_obsolete_count is the number of tables which are no longer referenced by the current DB state or any open iterators (gauge). | no | +| `TableObsoleteSize` | table_obsolete_size is the size, in bytes, of obsolete tables (gauge). | no | +| `TableZombieCount` | table_zombie_count is the number of tables no longer referenced by the current DB state, but are still in use by an open iterator (gauge). | no | +| `TableZombieSize` | table_zombie_size is the size, in bytes, of zombie tables (gauge). | no | +| `RangeKeySetsCount` | range_key_sets_count is the approximate count of internal range key sets in the store. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## TELEMETRY + +Events in this file are related to bulk ingest operations performance metrics. + +Events in this category are logged to the `TELEMETRY` channel. + + +### `bulk_ingest_completed` + +An event of type `bulk_ingest_completed` is an event that is logged when a bulk ingest job +(restore, import, etc.) completes successfully. +It captures key performance metrics for the operation. + + +| Field | Description | Sensitive | +|--|--|--| +| `JobID` | JobID is the ID of the bulk ingest job. | no | +| `JobType` | JobType identifies the type of bulk ingest job (e.g., "restore", "import"). | no | +| `NumRows` | NumRows is the number of rows successfully ingested. | no | +| `DurationSeconds` | Duration of the ingest operation in seconds. | no | +| `DataSizeMb` | Total logical size of data ingested in megabytes. | no | +| `NodeCount` | Number of nodes that participated in the ingest operation. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Telemetry events + + + +Events in this category are logged to the `TELEMETRY` channel. + + +### `captured_index_usage_stats` + +An event of type `captured_index_usage_stats` + + +| Field | Description | Sensitive | +|--|--|--| +| `TotalReadCount` | TotalReadCount is the number of times the index has been read. | no | +| `LastRead` | LastRead is the timestamp at which the index was last read. | no | +| `TableID` | TableID is the ID of the table on which the index was created. This is same as descpb.TableID and is unique within the cluster. | no | +| `IndexID` | IndexID is the ID of the index within the scope of the given table. | no | +| `DatabaseName` | DatabaseName is the name of the database in which the index was created. | no | +| `TableName` | TableName is the name of the table on which the index was created. | no | +| `IndexName` | IndexName is the name of the index within the scope of the given table. | no | +| `IndexType` | IndexType is the type of the index. Index types include "primary" and "secondary". | no | +| `IsUnique` | IsUnique indicates if the index has a UNIQUE constraint. | no | +| `IsInverted` | IsInverted indicates if the index is an inverted index. | no | +| `CreatedAt` | CreatedAt is the timestamp at which the index was created. | no | +| `SchemaName` | SchemaName is the name of the schema in which the index was created. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `recovery_event` + +An event of type `recovery_event` is an event that is logged on every invocation of BACKUP, +RESTORE, and on every BACKUP schedule creation, with the appropriate subset +of fields populated depending on the type of event. This event is is also +logged whenever a BACKUP and RESTORE job completes or fails. + + +| Field | Description | Sensitive | +|--|--|--| +| `RecoveryType` | RecoveryType is the type of recovery described by this event, which is one of - backup - scheduled_backup - create_schedule - restore

It can also be a job event corresponding to the recovery, which is one of - backup_job - scheduled_backup_job - restore_job | no | +| `TargetScope` | TargetScope is the largest scope of the targets that the user is backing up or restoring based on the following order: table < schema < database < full cluster. | no | +| `IsMultiregionTarget` | IsMultiregionTarget is true if any of the targets contain objects with multi-region primitives. | no | +| `TargetCount` | TargetCount is the number of targets the in the BACKUP/RESTORE. | no | +| `DestinationStorageTypes` | DestinationStorageTypes are the types of storage that the user is backing up to or restoring from. | no | +| `DestinationAuthTypes` | DestinationAuthTypes are the types of authentication methods that the user is using to access the destination storage. | no | +| `IsLocalityAware` | IsLocalityAware indicates if the BACKUP or RESTORE is locality aware. | no | +| `WithRevisionHistory` | WithRevisionHistory is true if the BACKUP includes revision history. | no | +| `HasEncryptionPassphrase` | HasEncryptionPassphrase is true if the user provided an encryption passphrase to encrypt/decrypt their backup. | no | +| `KMSType` | KMSType is the type of KMS the user is using to encrypt/decrypt their backup. | no | +| `KMSCount` | KMSCount is the number of KMS the user is using. | no | +| `Options` | Options contain all the names of the options specified by the user in the BACKUP or RESTORE statement. For options that are accompanied by a value, only those with non-empty values will be present.

It's important to note that there are no option values anywhere in the event payload. Future changes to telemetry should refrain from adding values to the payload unless they are properly redacted. | no | +| `JobID` | JobID is the ID of the BACKUP/RESTORE job. | no | +| `ResultStatus` | ResultStatus indicates whether the job succeeded or failed. | no | +| `ErrorText` | ErrorText is the text of the error that caused the job to fail. | partially | +| `RecurringCron` | RecurringCron is the crontab for the incremental backup. | no | +| `FullBackupCron` | FullBackupCron is the crontab for the full backup. | no | +| `CustomFirstRunTime` | CustomFirstRunTime is the timestamp for the user configured first run time. Expressed as nanoseconds since the Unix epoch. | no | +| `IgnoreExistingBackup` | IgnoreExistingBackup is true iff the BACKUP schedule should still be created even if a backup is already present in its destination. | no | +| `ApplicationName` | The application name for the session where recovery event was created. | no | +| `NumRows` | NumRows is the number of rows successfully imported, backed up or restored. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_descriptor` + +An event of type `schema_descriptor` is an event for schema telemetry, whose purpose is +to take periodic snapshots of the cluster's SQL schema and publish them in +the telemetry log channel. For all intents and purposes, the data in such a +snapshot can be thought of the outer join of certain system tables: +namespace, descriptor, and at some point perhaps zones, etc. + +Snapshots are too large to conveniently be published as a single log event, +so instead they're broken down into SchemaDescriptor events which +contain the data in one record of this outer join projection. These events +are prefixed by a header (a SchemaSnapshotMetadata event). + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of the snapshot that this event is part of. | no | +| `ParentDatabaseID` | ParentDatabaseID matches the same key column in system.namespace. | no | +| `ParentSchemaID` | ParentSchemaID matches the same key column in system.namespace. | no | +| `Name` | Name matches the same key column in system.namespace. | no | +| `DescID` | DescID matches the 'id' column in system.namespace and system.descriptor. | no | +| `Desc` | Desc matches the 'descriptor' column in system.descriptor. Some contents of the descriptor may be redacted to prevent leaking PII. | no | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +### `schema_snapshot_metadata` + +An event of type `schema_snapshot_metadata` is an event describing a schema snapshot, which +is a set of SchemaDescriptor messages sharing the same SnapshotID. + + +| Field | Description | Sensitive | +|--|--|--| +| `SnapshotID` | SnapshotID is the unique identifier of this snapshot. | no | +| `NumRecords` | NumRecords is how many SchemaDescriptor events are in the snapshot. | no | +| `AsOfTimestamp` | AsOfTimestamp is when the snapshot was taken. This is equivalent to the timestamp given in the AS OF SYSTEM TIME clause when querying the namespace and descriptor tables in the system database. Expressed as nanoseconds since the Unix epoch. | no | +| `Errors` | Errors records any errors encountered when post-processing this snapshot, which includes the redaction of any potential PII. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | + +## Zone config events + +Events in this category pertain to zone configuration changes on +the SQL schema or system ranges. + +When zone configs apply to individual tables or other objects in a +SQL logical schema, they are relative to a particular SQL tenant. +In a multi-tenant setup, copies of these zone config events are preserved +in each tenant's own `system.eventlog` table. + +When they apply to cluster-level ranges (e.g., the system zone config), +they are stored in the system tenant's own `system.eventlog` table. + +Events in this category are logged to the `OPS` channel. + + +### `remove_zone_config` + +An event of type `remove_zone_config` is recorded when a zone config is removed. + + + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + +### `set_zone_config` + +An event of type `set_zone_config` is recorded when a zone config is changed. + + +| Field | Description | Sensitive | +|--|--|--| +| `ResolvedOldConfig` | The string representation of the resolved old zone config. This is not necessarily the same as the zone config that was previously set -- as it includes the resolved values of the zone config options. In other words, a zone config that hasn't been properly "set" yet (and inherits from its parent) will have a resolved_old_config that has details of the values it inherits from its parent. This is particularly useful to get a proper diff between the old and new zone config. | yes | + + +#### Common fields + +| Field | Description | Sensitive | +|--|--|--| +| `Timestamp` | The timestamp of the event. Expressed as nanoseconds since the Unix epoch. | no | +| `EventType` | The type of the event. | no | +| `Statement` | A normalized copy of the SQL statement that triggered the event. The statement string contains a mix of sensitive and non-sensitive details (it is redactable). | partially | +| `Tag` | The statement tag. This is separate from the statement string, since the statement string can contain sensitive information. The tag is guaranteed not to. | no | +| `User` | The user account that triggered the event. The special usernames `root` and `node` are not considered sensitive. | depends | +| `DescriptorID` | The primary object descriptor affected by the operation. Set to zero for operations that don't affect descriptors. | no | +| `ApplicationName` | The application name for the session where the event was emitted. This is included in the event to ease filtering of logging output by application. | no | +| `PlaceholderValues` | The mapping of SQL placeholders to their values, for prepared statements. | yes | +| `TxnReadTimestamp` | The current read timestamp of the transaction that triggered the event, if in a transaction. | no | +| `Target` | The target object of the zone config change. | yes | +| `Config` | The applied zone config in YAML format. | yes | +| `Options` | The SQL representation of the applied zone config options. | yes | + + + + +## Enumeration types + +### `AuthFailReason` + +AuthFailReason is the inventory of possible reasons for an +authentication failure. + + +| Value | Textual alias in code or documentation | Description | +|--|--|--| +| 0 | UNKNOWN | is reported when the reason is unknown. | +| 1 | USER_RETRIEVAL_ERROR | occurs when there was an internal error accessing the principals. | +| 2 | USER_NOT_FOUND | occurs when the principal is unknown. | +| 3 | LOGIN_DISABLED | occurs when the user does not have LOGIN privileges. | +| 4 | METHOD_NOT_FOUND | occurs when no HBA rule matches or the method does not exist. | +| 5 | PRE_HOOK_ERROR | occurs when the authentication handshake encountered a protocol error. | +| 6 | CREDENTIALS_INVALID | occurs when the client-provided credentials were invalid. | +| 7 | CREDENTIALS_EXPIRED | occur when the credentials provided by the client are expired. | +| 8 | NO_REPLICATION_ROLEOPTION | occurs when the connection requires a replication role option, but the user does not have it. | +| 9 | AUTHORIZATION_ERROR | is used for errors during the authorization phase. For example, this would include issues with mapping LDAP groups to SQL roles and granting those roles to the user. | +| 10 | PROVISIONING_ERROR | is used for errors during the user provisioning phase. This would include errors when the transaction to provision the authenticating user failed to execute. | + + + diff --git a/src/current/_includes/cockroach-generated/release-26.2/logformats.md b/src/current/_includes/cockroach-generated/release-26.2/logformats.md new file mode 100644 index 00000000000..89580c9fc15 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/logformats.md @@ -0,0 +1,382 @@ + +The supported log output formats are documented below. + + +- [`crdb-v1`](#format-crdb-v1) + +- [`crdb-v1-count`](#format-crdb-v1-count) + +- [`crdb-v1-tty`](#format-crdb-v1-tty) + +- [`crdb-v1-tty-count`](#format-crdb-v1-tty-count) + +- [`crdb-v2`](#format-crdb-v2) + +- [`crdb-v2-tty`](#format-crdb-v2-tty) + +- [`json`](#format-json) + +- [`json-compact`](#format-json-compact) + +- [`json-fluent`](#format-json-fluent) + +- [`json-fluent-compact`](#format-json-fluent-compact) + + + +## Format `crdb-v1` + + +This is a legacy file format used from CockroachDB v1.0. + +Each log entry is emitted using a common prefix, described below, followed by: + +- The logging context tags enclosed between `[` and `]`, if any. It is possible + for this to be omitted if there were no context tags. +- Optionally, a counter column, if the option 'show-counter' is enabled. See below for details. +- the text of the log entry. + +Beware that the text of the log entry can span multiple lines. +The following caveats apply: + +- The text of the log entry can start with text enclosed between `[` and `]`. + If there were no logging tags to start with, it is not possible to distinguish between + logging context tag information and a `[...]` string in the main text of the + log entry. This means that this format is ambiguous. + + To remove this ambiguity, you can use the option 'show-counter'. + +- The text of the log entry can embed arbitrary application-level strings, + including strings that represent log entries. In particular, an accident + of implementation can cause the common entry prefix (described below) + to also appear on a line of its own, as part of the payload of a previous + log entry. There is no automated way to recognize when this occurs. + Care must be taken by a human observer to recognize these situations. + +- The log entry parser provided by CockroachDB to read log files is faulty + and is unable to recognize the aforementioned pitfall; nor can it read + entries larger than 64KiB successfully. Generally, use of this internal + log entry parser is discouraged for entries written with this format. + +See the newer format `crdb-v2` for an alternative +without these limitations. + +### Header lines + +At the beginning of each file, a header is printed using a similar format as +regular log entries. This header reports when the file was created, +which parameters were used to start the server, the server identifiers +if known, and other metadata about the running process. + +- This header appears to be logged at severity `INFO` (with an `I` prefix + at the start of the line) even though it does not really have a severity. +- The header is printed unconditionally even when a filter is configured to + omit entries at the `INFO` level. + +### Common log entry prefix + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker tags counter + +Reminder, the tags may be omitted; and the counter is only printed if the option +'show-counter' is specified. + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (omitted if zero for use by tests). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. | +| line | The line number where the entry originated. | +| marker | Redactability marker ` + redactableIndicator + ` (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. May be absent. | +| counter | The entry counter. Only included if 'show-counter' is enabled. | + +The redactability marker can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. + +If the marker ` + redactableIndicator + ` is present, the remainder of the log entry +contains delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `show-counter` | Whether to include the counter column in the line header. Without it, the format may be ambiguous due to the optionality of tags. | +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v1-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: none` + + +## Format `crdb-v1-tty` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: false` +- `colors: auto` + + +## Format `crdb-v1-tty-count` + +This format name is an alias for 'crdb-v1' with +the following format option defaults: + +- `show-counter: true` +- `colors: auto` + + +## Format `crdb-v2` + +This is the main file format used from CockroachDB v21.1. + +Each log entry is emitted using a common prefix, described below, +followed by the text of the log entry. + +### Entry format + +Each line of output starts with the following prefix: + + Lyymmdd hh:mm:ss.uuuuuu goid [chan@]file:line marker [tags...] counter cont + +| Field | Description | +|-----------------|--------------------------------------------------------------------------------------------------------------------------------------| +| L | A single character, representing the [log level](logging.html#logging-levels-severities) (e.g., `I` for `INFO`). | +| yy | The year (zero padded; i.e., 2016 is `16`). | +| mm | The month (zero padded; i.e., May is `05`). | +| dd | The day (zero padded). | +| hh:mm:ss.uuuuuu | Time in hours, minutes and fractional seconds. Timezone is UTC. | +| goid | The goroutine id (zero when cannot be determined). | +| chan | The channel number (omitted if zero for backward compatibility). | +| file | The file name where the entry originated. Also see below. | +| line | The line number where the entry originated. | +| marker | Redactability marker "⋮" (see below for details). | +| tags | The logging tags, enclosed between `[` and `]`. See below. | +| counter | The optional entry counter (see below for details). | +| cont | Continuation mark for structured and multi-line entries. See below. | + +The `chan@` prefix before the file name indicates the logging channel, +and is omitted if the channel is `DEV`. + +The file name may be prefixed by the string `(gostd) ` to indicate +that the log entry was produced inside the Go standard library, instead +of a CockroachDB component. Entry parsers must be configured to ignore this prefix +when present. + +`marker` can be empty; in this case, its position in the common prefix is +a double ASCII space character which can be used to reliably identify this situation. +If the marker "⋮" is present, the remainder of the log entry +contains delimiters (‹...›) +around fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +when log redaction is requested. + +The logging `tags` are enclosed between square brackets `[...]`, +and the syntax `[-]` is used when there are no logging tags +associated with the log entry. + +`counter` is numeric, and is incremented for every +log entry emitted to this sink. (There is thus one counter sequence per +sink.) For entries that do not have a counter value +associated (e.g., header entries in file sinks), the counter position +in the common prefix is empty: `tags` is then +followed by two ASCII space characters, instead of one space; the `counter`, +and another space. The presence of the two ASCII spaces indicates +reliably that no counter was present. + +`cont` is a format/continuation indicator: + +| Continuation indicator | ASCII | Description | +|------------------------|-------|--| +| space | 0x32 | Start of an unstructured entry. | +| equal sign, "=" | 0x3d | Start of a structured entry. | +| exclamation mark, "!" | 0x21 | Start of an embedded stack trace. | +| plus sign, "+" | 0x2b | Continuation of a multi-line entry. The payload contains a newline character at this position. | +| vertical bar | 0x7c | Continuation of a large entry. | + +### Examples + +Example single-line unstructured entry: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 started with engine type ‹2› +~~~ + +Example multi-line unstructured entry: + +~~~ +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 node startup completed: +I210116 21:49:17.083093 14 1@cli/start.go:690 ⋮ [-] 40 +CockroachDB node starting at 2021-01-16 21:49 (took 0.0s) +~~~ + +Example structured entry: + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventType":"node_restart"} +~~~ + +Example long entries broken up into multiple lines: + +~~~ +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.... +I210116 21:49:17.073282 14 server/node.go:464 ⋮ [-] 23 |aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +~~~ + +~~~ +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 ={"Timestamp":1610833757080706620,"EventTy... +I210116 21:49:17.080713 14 1@util/log/event_log.go:32 ⋮ [-] 32 |pe":"node_restart"} +~~~ + +### Backward-compatibility notes + +Entries in this format can be read by most `crdb-v1` log parsers, +in particular the one included in the DB console and +also the [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) +facility. + +However, implementers of previous version parsers must +understand that the logging tags field is now always +included, and the lack of logging tags is included +by a tag string set to `[-]`. + +Likewise, the entry counter is now also always included, +and there is a special character after `counter` +to indicate whether the remainder of the line is a +structured entry, or a continuation of a previous entry. + +Finally, in the previous format, structured entries +were prefixed with the string `Structured entry: `. In +the new format, they are prefixed by the `=` continuation +indicator. + + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `colors` | The color profile to use. Possible values: none, auto, ansi, 256color. Default is auto. | +| `timezone` | The timezone to use for the timestamp column. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | + + + +## Format `crdb-v2-tty` + +This format name is an alias for 'crdb-v2' with +the following format option defaults: + +- `colors: auto` + + +## Format `json` + +This format emits log entries as a JSON payload. + +The JSON object is guaranteed to not contain unescaped newlines +or other special characters, and the entry as a whole is followed +by a newline character. This makes the format suitable for +processing over a stream unambiguously. + +Each entry contains at least the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `tag` | `tag` | (Only if the option `fluent-tag: true` is given.) A Fluent tag for the event, formed by the process name and the logging channel. | +| `d` | `datetime` | The pretty-printed date/time of the event timestamp, if enabled via options. | +| `f` | `file` | The name of the source file where the event was emitted. | +| `g` | `goroutine` | The identifier of the goroutine where the event was emitted. | +| `l` | `line` | The line number where the event was emitted in the source. | +| `r` | `redactable` | Whether the payload is redactable (see below for details). | +| `t` | `timestamp` | The timestamp at which the event was emitted on the logging channel. | +| `v` | `version` | The binary version with which the event was generated. | + + +After a couple of *header* entries written at the beginning of each log sink, +all subsequent log entries also contain the following fields: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `C` | `channel` | The name of the logging channel where the event was sent. | +| `sev` | `severity` | The severity of the event. | +| `c` | `channel_numeric` | The numeric identifier for the logging channel where the event was sent. | +| `n` | `entry_counter` | The entry number on this logging sink, relative to the last process restart. | +| `s` | `severity_numeric` | The numeric value of the severity of the event. | + + +Additionally, the following fields are conditionally present: + +| Field name if `tag-style: compact` is specified | Field name if `tag-style: verbose` is specified | Description | +|-------|-------|-------------| +| `N` | `node_id` | The node ID where the event was generated, once known. Only reported for single-tenant or KV servers. | +| `x` | `cluster_id` | The cluster ID where the event was generated, once known. Only reported for single-tenant of KV servers. | +| `q` | `instance_id` | The SQL instance ID where the event was generated, once known. | +| `T` | `tenant_id` | The SQL tenant ID where the event was generated, once known. | +| `V` | `tenant_name` | The SQL virtual cluster where the event was generated, once known. | +| `tags` | `tags` | The logging context tags for the entry, if there were context tags. | +| `message` | `message` | For unstructured events, the flat text payload. | +| `event` | `event` | The logging event, if structured (see below for details). | +| `stacks` | `stacks` | Goroutine stacks, for fatal events. | + +When an entry is structured, the `event` field maps to a dictionary +whose structure is one of the documented structured events. See the [reference documentation](eventlog.html) +for structured events for a list of possible payloads. + +When the entry is marked as `redactable`, the `tags`, `message`, and/or `event` payloads +contain delimiters (‹...›) around +fields that are considered sensitive. These markers are automatically recognized +by [`cockroach debug zip`](cockroach-debug-zip.html) and [`cockroach debug merge-logs`](cockroach-debug-merge-logs.html) when log redaction is requested. + +Additional options recognized via `format-options`: + +| Option | Description | +|--------|-------------| +| `datetime-format` | The format to use for the `datetime` field. The value can be one of `none`, `iso8601`/`rfc3339` (synonyms), or `rfc1123`. Default is `none`. | +| `datetime-timezone` | The timezone to use for the `datetime` field. The value can be any timezone name recognized by the Go standard library. Default is `UTC` | +| `tag-style` | The tags to include in the envelope. The value can be `compact` (one letter tags) or `verbose` (long-form tags). Default is `verbose`. | +| `fluent-tag` | Whether to produce an additional field called `tag` for Fluent compatibility. Default is `false`. | + + + +## Format `json-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: false` +- `tag-style: compact` + + +## Format `json-fluent` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: verbose` + + +## Format `json-fluent-compact` + +This format name is an alias for 'json' with +the following format option defaults: + +- `fluent-tag: true` +- `tag-style: compact` + + diff --git a/src/current/_includes/cockroach-generated/release-26.2/logging.md b/src/current/_includes/cockroach-generated/release-26.2/logging.md new file mode 100644 index 00000000000..7661187987e --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/logging.md @@ -0,0 +1,188 @@ +## Logging levels (severities) + +### INFO + +The `INFO` severity is used for informational messages that do not +require action. + +### WARNING + +The `WARNING` severity is used for situations which may require special handling, +where normal operation is expected to resume automatically. + +### ERROR + +The `ERROR` severity is used for situations that require special handling, +where normal operation could not proceed as expected. +Other operations can continue mostly unaffected. + +### FATAL + +The `FATAL` severity is used for situations that require an immedate, hard +server shutdown. A report is also sent to telemetry if telemetry +is enabled. + + +## Logging channels + +### `DEV` + +The `DEV` channel is used during development to collect log +details useful for troubleshooting that fall outside the +scope of other channels. It is also the default logging +channel for events not associated with a channel. + +This channel is special in that there are no constraints as to +what may or may not be logged on it. Conversely, users in +production deployments are invited to not collect `DEV` logs in +centralized logging facilities, because they likely contain +sensitive operational data. +See [Configure logs](configure-logs.html#dev-channel). + +### `OPS` + +The `OPS` channel is used to report "point" operational events, +initiated by user operators or automation: + + - Operator or system actions on server processes: process starts, + stops, shutdowns, crashes (if they can be logged), + including each time: command-line parameters, current version being run + - Actions that impact the topology of a cluster: node additions, + removals, decommissions, etc. + - Job-related initiation or termination + - [Cluster setting](cluster-settings.html) changes + - [Zone configuration](configure-replication-zones.html) changes + +### `HEALTH` + +The `HEALTH` channel is used to report "background" operational +events, initiated by CockroachDB or reporting on automatic processes: + + - Current resource usage, including critical resource usage + - Node-node connection events, including connection errors and + gossip details + - Range and table leasing events + - Up- and down-replication, range unavailability + +### `STORAGE` + +The `STORAGE` channel is used to report low-level storage +layer events (RocksDB/Pebble). + +### `SESSIONS` + +The `SESSIONS` channel is used to report client network activity when enabled via +the `server.auth_log.sql_connections.enabled` and/or +`server.auth_log.sql_sessions.enabled` [cluster setting](cluster-settings.html): + + - Connections opened/closed + - Authentication events: logins, failed attempts + - Session and query cancellation + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_SCHEMA` + +The `SQL_SCHEMA` channel is used to report changes to the +SQL logical schema, excluding privilege and ownership changes +(which are reported separately on the `PRIVILEGES` channel) and +zone configuration changes (which go to the `OPS` channel). + +This includes: + + - Database/schema/table/sequence/view/type creation + - Adding/removing/changing table columns + - Changing sequence parameters + +`SQL_SCHEMA` events generally comprise changes to the schema that affect the +functional behavior of client apps using stored objects. + +### `USER_ADMIN` + +The `USER_ADMIN` channel is used to report changes +in users and roles, including: + + - Users added/dropped + - Changes to authentication credentials (e.g., passwords, validity, etc.) + - Role grants/revocations + - Role option grants/revocations + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `PRIVILEGES` + +The `PRIVILEGES` channel is used to report data +authorization changes, including: + + - Privilege grants/revocations on database, objects, etc. + - Object ownership changes + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SENSITIVE_ACCESS` + +The `SENSITIVE_ACCESS` channel is used to report SQL +data access to sensitive data: + + - Data access audit events (when table audit is enabled via + [ALTER TABLE ... EXPERIMENTAL_AUDIT](alter-table.html#experimental_audit)) + - Data access audit events (when role-based audit is enabled via + [`sql.log.user_audit` cluster setting](role-based-audit-logging.html#syntax-of-audit-settings)) + - SQL statements executed by users with the admin role + - Operations that write to system tables + +This is typically configured in "audit" mode, with event +numbering and synchronous writes. + +### `SQL_EXEC` + +The `SQL_EXEC` channel is used to report SQL execution on +behalf of client connections: + + - Logical SQL statement executions (when enabled via the + `sql.log.all_statements.enabled` [cluster setting](cluster-settings.html)) + - uncaught Go panic errors during the execution of a SQL statement. + +### `SQL_PERF` + +The `SQL_PERF` channel is used to report SQL executions +that are marked as "out of the ordinary" +to facilitate performance investigations. +This includes the SQL "slow query log". + +Arguably, this channel overlaps with `SQL_EXEC`. +However, we keep both channels separate for backward compatibility +with versions prior to v21.1, where the corresponding events +were redirected to separate files. + +### `SQL_INTERNAL_PERF` + +The `SQL_INTERNAL_PERF` channel is like the `SQL_PERF` channel, but is aimed at +helping developers of CockroachDB itself. It exists as a separate +channel so as to not pollute the `SQL_PERF` logging output with +internal troubleshooting details. + +### `TELEMETRY` + +The `TELEMETRY` channel reports telemetry events. Telemetry events describe +feature usage within CockroachDB and anonymizes any application- +specific data. + +### `KV_DISTRIBUTION` + +The `KV_DISTRIBUTION` channel is used to report data distribution events, such as moving +replicas between stores in the cluster, or adding (removing) replicas to +ranges. + +### `CHANGEFEED` + +The `CHANGEFEED` channel is used to report changefeed events + +### `KV_EXEC` + +The `KV_EXEC` channel is used to report KV execution events that don't fall into the +KV_DISTRIBUTION channel. + diff --git a/src/current/_includes/cockroach-generated/release-26.2/settings/settings.html b/src/current/_includes/cockroach-generated/release-26.2/settings/settings.html new file mode 100644 index 00000000000..9a6fb50965d --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/settings/settings.html @@ -0,0 +1,411 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SettingTypeDefaultDescriptionSupported Deployments
admission.disk_bandwidth_tokens.elastic.enabled
booleantruewhen true, and provisioned bandwidth for the disk corresponding to a store is configured, tokens for elastic work will be limited if disk bandwidth becomes a bottleneckAdvanced/Self-Hosted
admission.epoch_lifo.enabled
booleanfalsewhen true, epoch-LIFO behavior is enabled when there is significant delay in admissionBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_closing_delta_duration
duration5msthe delta duration before closing an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.epoch_duration
duration100msthe duration of an epoch, for epoch-LIFO admission control orderingBasic/Standard/Advanced/Self-Hosted
admission.epoch_lifo.queue_delay_threshold_to_switch_to_lifo
duration105msthe queue delay encountered by a (tenant,priority) for switching to epoch-LIFO orderingBasic/Standard/Advanced/Self-Hosted
admission.kv.enabled
booleantruewhen true, work performed by the KV layer is subject to admission controlAdvanced/Self-Hosted
admission.sql_kv_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a KV response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
admission.sql_sql_response.enabled
booleantruewhen true, work performed by the SQL layer when receiving a DistSQL response is subject to admission controlBasic/Standard/Advanced/Self-Hosted
bulkio.backup.file_size
byte size128 MiBtarget size for individual data files produced during BACKUPBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_timeout
duration5m0samount of time after which a read attempt is considered timed out, which causes the backup to failBasic/Standard/Advanced/Self-Hosted
bulkio.backup.read_with_priority_after
duration1m0samount of time since the read-as-of time above which a BACKUP should use priority when retrying readsBasic/Standard/Advanced/Self-Hosted
bulkio.import.row_count_validation.mode
(alias: bulkio.import.row_count_validation.unsafe.mode)
enumerationoffcontrols validation of imported data via INSPECT jobs. Options: 'off' (no validation), 'async' (background validation), 'sync' (blocking validation). If disabled, IMPORT will not perform a post-import row count check. [off = 0, async = 1, sync = 2]Basic/Standard/Advanced/Self-Hosted
bulkio.merge.file_size
byte size1.0 GiBtarget size for individual data files produced during local only merge phasesBasic/Standard/Advanced/Self-Hosted
physical_replication.consumer.minimum_flush_interval
(alias: bulkio.stream_ingestion.minimum_flush_interval)
duration5sthe minimum timestamp between flushes; flushes may still occur if internal buffers fill upAdvanced/Self-Hosted
changefeed.aggregator.flush_jitter
float0.1jitter aggregator flushes as a fraction of min_checkpoint_frequency. This setting has no effect if min_checkpoint_frequency is set to 0.Basic/Standard/Advanced/Self-Hosted
changefeed.backfill.concurrent_scan_requests
integer0number of concurrent scan requests per node issued during a backfillBasic/Standard/Advanced/Self-Hosted
changefeed.backfill.scan_request_size
integer524288the maximum number of bytes returned by each scan requestBasic/Standard/Advanced/Self-Hosted
changefeed.batch_reduction_retry.enabled
(alias: changefeed.batch_reduction_retry_enabled)
booleanfalseif true, kafka changefeeds upon erroring on an oversized batch will attempt to resend the messages with progressively lower batch sizesBasic/Standard/Advanced/Self-Hosted
changefeed.default_range_distribution_strategy
enumerationdefaultcontrols how changefeed work is distributed across nodes. 'default' defers to DistSQL for node selection and work distribution. 'balanced_simple' uses DistSQL for node selection but then attempts to evenly distribute ranges across those selected nodes for better load balancing. this setting does not override locality restrictions and can be overridden per-changefeed using the 'range_distribution_strategy' option. [default = 0, balanced_simple = 1]Basic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_worker_queue_size
integer16if changefeed.event_consumer_workers is enabled, this setting sets the maxmimum number of events which a worker can bufferBasic/Standard/Advanced/Self-Hosted
changefeed.event_consumer_workers
integer0the number of workers to use when processing events: <0 disables, 0 assigns a reasonable default, >0 assigns the setting value. for experimental/core changefeeds and changefeeds using parquet format, this is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.fast_gzip.enabled
booleantrueuse fast gzip implementationBasic/Standard/Advanced/Self-Hosted
changefeed.span_checkpoint.lag_threshold
(alias: changefeed.frontier_highwater_lag_checkpoint_threshold)
duration10m0sthe amount of time a changefeed's lagging (slowest) spans must lag behind its leading (fastest) spans before a span-level checkpoint to save leading span progress is written; if 0, span-level checkpoints due to lagging spans is disabledBasic/Standard/Advanced/Self-Hosted
changefeed.kafka.max_request_size
byte size256 MiBthe maximum number of uncompressed bytes sent in a single request to a Kafka broker; lowering this value helps avoid spurious "message too large" errors that can occur when multiple messages are combined into a single batch; this setting is overridden by the per-changefeed Flush { MaxBytes: <int> } optionBasic/Standard/Advanced/Self-Hosted
changefeed.kafka_v2_error_details.enabled
booleantrueif enabled, Kafka v2 sinks will include the message key, size, and MVCC timestamp in message too large errorsBasic/Standard/Advanced/Self-Hosted
changefeed.memory.per_changefeed_limit
byte size512 MiBcontrols amount of data that can be buffered per changefeedBasic/Standard/Advanced/Self-Hosted
changefeed.resolved_timestamp.min_update_interval
(alias: changefeed.min_highwater_advance)
duration0sminimum amount of time that must have elapsed since the last time a changefeed's resolved timestamp was updated before it is eligible to be updated again; default of 0 means no minimum interval is enforced but updating will still be limited by the average time it takes to checkpoint progressBasic/Standard/Advanced/Self-Hosted
changefeed.node_throttle_config
stringspecifies node level throttling configuration for all changefeeedsBasic/Standard/Advanced/Self-Hosted
changefeed.partition_alg.enabled
booleanfalseif enabled, allows specifying the partition_alg changefeed option to choose between fnv-1a (default) and murmur2 hash functions for Kafka partitioning. Only affects changefeeds using a kafka sink with changefeed.new_kafka_sink_enabled set to true.Basic/Standard/Advanced/Self-Hosted
changefeed.progress.frontier_persistence.interval
duration30sminimum amount of time that must elapse before a changefeed will persist its entire span frontier againBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp.max_age
duration96h0m0sfail the changefeed if the protected timestamp age exceeds this threshold; 0 disables expirationBasic/Standard/Advanced/Self-Hosted
changefeed.protect_timestamp_interval
duration10m0scontrols how often the changefeed forwards its protected timestamp to the resolved timestampBasic/Standard/Advanced/Self-Hosted
changefeed.schema_feed.read_with_priority_after
duration1m0sretry with high priority if we were not able to read descriptors for too long; 0 disablesBasic/Standard/Advanced/Self-Hosted
changefeed.sink_io_workers
integer0the number of workers used by changefeeds when sending requests to the sink (currently the batching versions of webhook, pubsub, and kafka sinks that are enabled by changefeed.new_<sink type>_sink_enabled only): <0 disables, 0 assigns a reasonable default, >0 assigns the setting valueBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.concurrent_upload_buffers
integer1controls the number of concurrent buffers that will be used by the Azure client when uploading chunks.Each buffer can buffer up to cloudstorage.write_chunk.size of memory during an uploadBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.azure.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.gs.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.custom_ca
stringcustom root CA (appended to system's default CAs) for verifying certificates when interacting with HTTPS storageBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.http.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nodelocal.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.nullsink.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.s3.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.timeout
duration10m0sthe timeout for import/export storage operationsBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.read.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_burst_limit
byte size0 Bburst limit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cloudstorage.userfile.write.node_rate_limit
byte size0 Blimit on number of bytes per second per node across operations writing to the designated cloud storage provider if non-zeroBasic/Standard/Advanced/Self-Hosted
cluster.auto_upgrade.enabled
booleantruedisable automatic cluster version upgrade until resetBasic/Standard/Advanced/Self-Hosted
cluster.organization
stringorganization nameAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
cluster.preserve_downgrade_option
stringdisable (automatic or manual) cluster version upgrade from the specified version until resetBasic/Standard/Advanced/Self-Hosted
debug.zip.redact_addresses.enabled
booleanfalseenables the redaction of hostnames and ip addresses in debug zipBasic/Standard/Advanced/Self-Hosted
diagnostics.active_query_dumps.enabled
booleantrueexperimental: enable dumping of anonymized active queries to disk when node is under memory pressureAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.forced_sql_stat_reset.interval
duration2h0m0sinterval after which the reported SQL Stats are reset even if not collected by telemetry reporter. It has a max value of 24H.Basic/Standard/Advanced/Self-Hosted
diagnostics.memory_monitoring_dumps.enabled
booleantrueenable dumping of memory monitoring state at the same time as heap profiles are takenAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
diagnostics.reporting.enabled
booleantrueenable reporting diagnostic metrics to cockroach labs, but is ignored for Trial or Free licensesBasic/Standard/Advanced/Self-Hosted
diagnostics.reporting.interval
duration1h0m0sinterval at which diagnostics data should be reportedBasic/Standard/Advanced/Self-Hosted
enterprise.license
stringthe encoded cluster licenseAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
external.graphite.endpoint
stringif nonempty, push server metrics to the Graphite or Carbon server at the specified host:portBasic/Standard/Advanced/Self-Hosted
external.graphite.interval
duration10sthe interval at which metrics are pushed to Graphite (if enabled)Basic/Standard/Advanced/Self-Hosted
feature.backup.enabled
booleantrueset to true to enable backups, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.changefeed.enabled
booleantrueset to true to enable changefeeds, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.export.enabled
booleantrueset to true to enable exports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.import.enabled
booleantrueset to true to enable imports, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.infer_rbr_region_col_using_constraint.enabled
booleanfalseset to true to enable looking up the region column via a foreign key constraint in a REGIONAL BY ROW table, false to disable; default is falseBasic/Standard/Advanced/Self-Hosted
feature.restore.enabled
booleantrueset to true to enable restore, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.schema_change.enabled
booleantrueset to true to enable schema changes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.stats.enabled
booleantrueset to true to enable CREATE STATISTICS/ANALYZE, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
feature.vector_index.enabled
booleantrueset to true to enable vector indexes, false to disable; default is trueBasic/Standard/Advanced/Self-Hosted
jobs.retention_time
duration336h0m0sthe amount of time for which records for completed jobs are retainedBasic/Standard/Advanced/Self-Hosted
kv.allocator.lease_rebalance_threshold
float0.05minimum fraction away from the mean a store's lease count can be before it is considered for lease-transfersAdvanced/Self-Hosted
kv.allocator.load_based_lease_rebalancing.enabled
booleantrueset to enable rebalancing of range leases based on load and latency; has no effect when kv.allocator.load_based_rebalancing is set to 'multi-metric only' or 'multi-metric and count'Advanced/Self-Hosted
kv.allocator.load_based_rebalancing
enumerationleases and replicaswhether to rebalance based on the distribution of load across stores [off = 0, leases = 1, leases and replicas = 2, multi-metric only = 3, multi-metric and count = 4]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing.objective
enumerationcpuwhat objective does the cluster use to rebalance; if set to `qps` the cluster will attempt to balance qps among stores, if set to `cpu` the cluster will attempt to balance cpu usage among stores [qps = 0, cpu = 1]Advanced/Self-Hosted
kv.allocator.load_based_rebalancing_interval
duration1m0sthe rough interval at which each store will check for load-based lease / replica rebalancing opportunitiesAdvanced/Self-Hosted
kv.allocator.qps_rebalance_threshold
float0.1minimum fraction away from the mean a store's QPS (such as queries per second) can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.range_rebalance_threshold
float0.05minimum fraction away from the mean a store's range count can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.allocator.store_cpu_rebalance_threshold
float0.1minimum fraction away from the mean a store's cpu usage can be before it is considered overfull or underfullAdvanced/Self-Hosted
kv.bulk_io_write.max_rate
byte size1.0 TiBthe rate limit (bytes/sec) to use for writes to disk on behalf of bulk io opsAdvanced/Self-Hosted
kv.bulk_io_write.min_capacity_remaining_fraction
float0.05remaining store capacity fraction below which bulk ingestion requests are rejectedAdvanced/Self-Hosted
kv.bulk_sst.max_allowed_overage
byte size64 MiBif positive, allowed size in excess of target size for SSTs from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-Hosted
kv.bulk_sst.target_size
byte size16 MiBtarget size for SSTs emitted from export requests; export requests (i.e. BACKUP) may buffer up to the sum of kv.bulk_sst.target_size and kv.bulk_sst.max_allowed_overage in memoryAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.follower_reads.enabled
(alias: kv.closed_timestamp.follower_reads_enabled)
booleantrueallow (all) replicas to serve consistent historical reads based on closed timestamp informationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_auto_tune.enabled
booleanfalseif enabled, observed network latency between leaseholders and their furthest follower will be used to adjust closed timestamp policies for rangesranges configured to serve global reads. kv.closed_timestamp.lead_for_global_reads_override takes precedence if set.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.lead_for_global_reads_override
duration0sif nonzero, overrides the lead time that global_read ranges use to publish closed timestampsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_interval
duration200msthe interval at which the closed timestamp side-transport attempts to advance each range's closed timestamp; set to 0 to disable the side-transportAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_pacing_refresh_interval
duration10msthe refresh interval for the task pacer that controls pacing of sending sidetransport updates to avoid overloading the system when many connections are waitingAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.side_transport_pacing_smear_interval
duration1msthe smear interval for the task pacer that controls the amount of time each paced batch is going to take when broadcasting sidetransport updatesAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.closed_timestamp.target_duration
duration3sif nonzero, attempt to provide closed timestamp notifications for timestamps trailing cluster time by approximately this durationAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.dist_sender.circuit_breaker.cancellation.enabled
booleantruewhen enabled, in-flight requests will be cancelled when the circuit breaker tripsBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.cancellation.write_grace_period
duration10show long after the circuit breaker trips to cancel write requests (these can't retry internally, so should be long enough to allow quorum/lease recovery)Basic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.interval
duration3sinterval between replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.threshold
duration3sduration of errors or stalls after which a replica will be probedBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breaker.probe.timeout
duration3stimeout for replica probesBasic/Standard/Advanced/Self-Hosted
kv.dist_sender.circuit_breakers.mode
enumerationliveness range onlyset of ranges to trip circuit breakers for failing or stalled replicas [no ranges = 0, liveness range only = 1, all ranges = 2]Basic/Standard/Advanced/Self-Hosted
kv.lease_transfer_read_summary.global_budget
byte size0 Bcontrols the maximum number of bytes that will be used to summarize the global segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.lease_transfer_read_summary.local_budget
byte size4.0 MiBcontrols the maximum number of bytes that will be used to summarize the local segment of the timestamp cache during lease transfers and range merges. A smaller budget will result in loss of precision.Advanced/Self-Hosted
kv.log_range_and_node_events.enabled
booleantrueset to true to transactionally log range events (e.g., split, merge, add/remove voter/non-voter) into system.rangelogand node join and restart events into system.eventologAdvanced/Self-Hosted
kv.protectedts.reconciliation.interval
duration5m0sthe frequency for reconciling jobs with protected timestamp recordsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.raft.leader_fortification.fraction_enabled
float1controls the fraction of ranges for which the raft leader fortification protocol is enabled. Leader fortification is needed for a range to use a Leader lease. Set to 0.0 to disable leader fortification and, by extension, Leader leases. Set to 1.0 to enable leader fortification for all ranges and, by extension, use Leader leases for all ranges which do not require expiration-based leases. Set to a value between 0.0 and 1.0 to gradually roll out Leader leases across the ranges in a cluster.Advanced/Self-Hosted
kv.range.range_size_hard_cap
byte size8.0 GiBhard cap on the maximum size a range is allowed to grow to withoutsplitting before writes to the range are blocked. Takes precedence over all other configurationsAdvanced/Self-Hosted
kv.range_split.by_load.enabled
(alias: kv.range_split.by_load_enabled)
booleantrueallow automatic splits of ranges based on where load is concentratedAdvanced/Self-Hosted
kv.range_split.load_cpu_threshold
duration500msthe CPU use per second over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.range_split.load_qps_threshold
integer2500the QPS over which, the range becomes a candidate for load based splittingAdvanced/Self-Hosted
kv.rangefeed.client.stream_startup_rate
integer100controls the rate per second the client will initiate new rangefeed stream for a single range; 0 implies unlimitedBasic/Standard/Advanced/Self-Hosted
kv.rangefeed.closed_timestamp_refresh_interval
duration3sthe interval at which closed-timestamp updatesare delivered to rangefeeds; set to 0 to use kv.closed_timestamp.side_transport_intervalAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.rangefeed.enabled
booleanfalseif set, rangefeed registration is enabledAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
kv.replica_circuit_breaker.slow_replication_threshold
duration1m0sduration after which slow proposals trip the per-Replica circuit breaker (zero duration disables breakers)Advanced/Self-Hosted
kv.replica_raft.leaderless_unavailable_threshold
duration1m0sduration after which leaderless replicas is considered unavailable. Set to 0 to disable leaderless replica availability checksAdvanced/Self-Hosted
kv.replica_stats.addsst_request_size_factor
integer50000the divisor that is applied to addsstable request sizes, then recorded in a leaseholders QPS; 0 means all requests are treated as cost 1Advanced/Self-Hosted
kv.replication_reports.interval
duration1m0sthe frequency for generating the replication_constraint_stats, replication_stats_report and replication_critical_localities reports (set to 0 to disable)Advanced/Self-Hosted
kv.snapshot_rebalance.max_rate
byte size32 MiBthe rate limit (bytes/sec) to use for rebalance and upreplication snapshotsAdvanced/Self-Hosted
kv.transaction.max_intents_and_locks
integer0maximum count of inserts or durable locks for a single transactions, 0 to disableBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_intents_bytes
integer4194304maximum number of bytes used to track locks in transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.max_refresh_spans_bytes
integer4194304maximum number of bytes used to track refresh spans in serializable transactionsBasic/Standard/Advanced/Self-Hosted
kv.transaction.randomized_anchor_key.enabled
booleanfalsedictates whether a transactions anchor key is randomized or notBasic/Standard/Advanced/Self-Hosted
kv.transaction.reject_over_max_intents_budget.enabled
booleanfalseif set, transactions that exceed their lock tracking budget (kv.transaction.max_intents_bytes) are rejected instead of having their lock spans imprecisely compressedBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_buffering.enabled
booleantrueif enabled, transactional writes are buffered on the clientBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_buffering.max_buffer_size
byte size4.0 MiBif non-zero, defines that maximum size of the buffer that will be used to buffer transactional writes per-transactionBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.locking_reads.enabled
booleantrueif enabled, transactional locking reads are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.ranged_writes.enabled
booleantrueif enabled, transactional ranged writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.enabled
(alias: kv.transaction.write_pipelining_enabled)
booleantrueif enabled, transactional writes are pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kv.transaction.write_pipelining.max_batch_size
(alias: kv.transaction.write_pipelining_max_batch_size)
integer128if non-zero, defines that maximum size batch that will be pipelined through Raft consensusBasic/Standard/Advanced/Self-Hosted
kvadmission.store.provisioned_bandwidth
byte size0 Bif set to a non-zero value, this is used as the provisioned bandwidth (in bytes/s), for each store. It can be overridden on a per-store basis using the --store flag. Note that setting the provisioned bandwidth to a positive value may enable disk bandwidth based admission control, since admission.disk_bandwidth_tokens.elastic.enabled defaults to trueAdvanced/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.enabled
booleantrueif set to true, snapshot ingests will be subject to disk write control in ACAdvanced/Self-Hosted
kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled
booleantrueif set to true, snapshot ingests will be admitted at a minimum rate when kvadmission.store.provisioned_bandwidth is set to a non-zero value. Disabling this setting can lead to snapshots being starved out by foreground traffic.Advanced/Self-Hosted
log.channel_compatibility_mode.enabled
booleanfalsewhen true, logs will to log to their legacy (pre 26.1) logging channels; when false, logs will be logged to new logging channelsBasic/Standard/Advanced/Self-Hosted
obs.ash.buffer_size
integer1000000number of ASH samples to retain in memoryAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
obs.ash.enabled
booleanfalseenable active session history samplingAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
obs.ash.log_interval
duration10m0sinterval between periodic ASH top-N workload summary logs; also used as the lookback window for ASH reports written by the env sampler profilerAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
obs.ash.log_top_n
integer10maximum number of entries in periodic ASH workload summary, ranked by sample count descendingAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
obs.ash.response_limit
integer10000maximum number of ASH samples returned per node in fan-out responsesBasic/Standard/Advanced/Self-Hosted
obs.ash.sample_interval
duration1sinterval between ASH samplesAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
obs.tablemetadata.automatic_updates.enabled
booleanfalseenables automatic updates of the table metadata cache system.table_metadataBasic/Standard/Advanced/Self-Hosted
obs.tablemetadata.data_valid_duration
duration20m0sthe duration for which the data in system.table_metadata is considered validBasic/Standard/Advanced/Self-Hosted
schedules.backup.gc_protection.enabled
booleantrueenable chaining of GC protection across backups run as part of a scheduleBasic/Standard/Advanced/Self-Hosted
security.client_cert.san_required.enabled
booleanfalsemandates a requirement for client certs to contain SANAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
security.client_cert.subject_required.enabled
booleanfalsemandates a requirement for subject role to be set for db userAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
security.ocsp.mode
enumerationoffuse OCSP to check whether TLS certificates are revoked. If the OCSP server is unreachable, in strict mode all certificates will be rejected and in lax mode all certificates will be accepted. [off = 0, lax = 1, strict = 2]Basic/Standard/Advanced/Self-Hosted
security.ocsp.timeout
duration3stimeout before considering the OCSP server unreachableBasic/Standard/Advanced/Self-Hosted
security.provisioning.ldap.enabled
booleanfalseenables automatic creation of SQL users upon successful LDAP loginBasic/Standard/Advanced/Self-Hosted
server.auth_log.sql_connections.enabled
booleanfalseif set, log SQL client connect and disconnect events to the SESSIONS log channel (note: may hinder performance on loaded nodes)Basic/Standard/Advanced/Self-Hosted
server.auth_log.sql_sessions.enabled
booleanfalseif set, log verbose SQL session authentication events to the SESSIONS log channel (note: may hinder performance on loaded nodes). Session start and end events are always logged regardless of this setting; disable the SESSIONS log channel to suppress them.Basic/Standard/Advanced/Self-Hosted
server.authentication_cache.enabled
booleantrueenables a cache used during authentication to avoid lookups to system tables when retrieving per-user authentication-related informationBasic/Standard/Advanced/Self-Hosted
server.child_metrics.enabled
booleanfalseenables the exporting of child metrics, additional prometheus time series with extra labelsBasic/Standard/Advanced/Self-Hosted
server.child_metrics.include_aggregate.enabled
booleantrueinclude the reporting of the aggregate time series when child metrics are enabled. This cluster setting has no effect if child metrics are disabled.Basic/Standard/Advanced/Self-Hosted
server.clock.forward_jump_check.enabled
(alias: server.clock.forward_jump_check_enabled)
booleanfalseif enabled, forward clock jumps > max_offset/2 will cause a panicBasic/Standard/Advanced/Self-Hosted
server.clock.persist_upper_bound_interval
duration0sthe interval between persisting the wall time upper bound of the clock. The clock does not generate a wall time greater than the persisted timestamp and will panic if it sees a wall time greater than this value. When cockroach starts, it waits for the wall time to catch-up till this persisted timestamp. This guarantees monotonic wall time across server restarts. Not setting this or setting a value of 0 disables this feature.Basic/Standard/Advanced/Self-Hosted
server.consistency_check.max_rate
byte size8.0 MiBthe rate limit (bytes/sec) to use for consistency checks; used in conjunction with server.consistency_check.interval to control the frequency of consistency checks. Note that setting this too high can negatively impact performance.Advanced/Self-Hosted
server.eventlog.enabled
booleantrueif set, logged notable events are also stored in the table system.eventlogBasic/Standard/Advanced/Self-Hosted
server.eventlog.ttl
duration2160h0m0sif nonzero, entries in system.eventlog older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.host_based_authentication.configuration
stringhost-based authentication configuration to use during connection authenticationBasic/Standard/Advanced/Self-Hosted
server.hot_ranges_request.node.timeout
duration5m0sthe duration allowed for a single node to return hot range data before the request is cancelled; if set to 0, there is no timeoutBasic/Standard/Advanced/Self-Hosted
server.hsts.enabled
booleanfalseif true, HSTS headers will be sent along with all HTTP requests. The headers will contain a max-age setting of one year. Browsers honoring the header will always use HTTPS to access the DB Console. Ensure that TLS is correctly configured prior to enabling.Basic/Standard/Advanced/Self-Hosted
server.http.base_path
string/path to redirect the user to upon succcessful loginBasic/Standard/Advanced/Self-Hosted
server.identity_map.configuration
stringsystem-identity to database-username mappingsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.audience
stringsets accepted audience values for JWT logins over the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.claim
stringsets the JWT claim that is parsed to get the usernameBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.client.timeout
duration15ssets the client timeout for external calls made during JWT authentication (e.g. fetching JWKS, etc.)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.enabled
booleanfalseenables or disables JWT login for the SQL interfaceBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.configuration
(alias: server.jwt_authentication.issuers)
stringsets accepted issuer values for JWT logins over the SQL interface which can be a single issuer URL string or a JSON string containing an array of issuer URLs or a JSON object containing map of issuer URLS to JWKS URIsBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.issuers.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while fetching JWKSBasic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks
string{"keys":[]}sets the public key set for JWT logins over the SQL interface (JWKS format)Basic/Standard/Advanced/Self-Hosted
server.jwt_authentication.jwks_auto_fetch.enabled
booleanfalseenables or disables automatic fetching of JWKS from the issuer's well-known endpoint or JWKS URI set in JWTAuthIssuersConfig. If this is enabled, the server.jwt_authentication.jwks will be ignored.Basic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_certificate
stringsets the client certificate PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.client.tls_key
stringsets the client key PEM for establishing mTLS connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.ldap_authentication.domain.custom_ca
stringsets the PEM encoded custom root CA for verifying domain certificates when establishing connection with LDAP serverBasic/Standard/Advanced/Self-Hosted
server.log_gc.max_deletions_per_cycle
integer1000the maximum number of entries to delete on each purge of log-like system tablesBasic/Standard/Advanced/Self-Hosted
server.log_gc.period
duration1h0m0sthe period at which log-like system tables are checked for old entriesBasic/Standard/Advanced/Self-Hosted
server.max_connections_per_gateway
integer-1the maximum number of SQL connections per gateway allowed at a given time (note: this will only limit future connection attempts and will not affect already established connections). Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.max_open_transactions_per_gateway
integer-1the maximum number of open SQL transactions per gateway allowed at a given time. Negative values result in unlimited number of connections. Superusers are not affected by this limit.Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.autologin.enabled
(alias: server.oidc_authentication.autologin)
booleanfalseif true, logged-out visitors to the DB Console will be automatically redirected to the OIDC login endpointBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.button_text
stringLog in with your OIDC providertext to show on button on DB Console login page to login with your OIDC provider (only shown if OIDC is enabled)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.claim_json_key
stringsets JSON key of principal to extract from payload after OIDC authentication completes (usually email or sid)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client.timeout
duration15ssets the client timeout for external calls made during OIDC authentication (e.g. authorization code flow, etc.)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_id
stringsets OIDC client idBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.client_secret
stringsets OIDC client secretBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.enabled
booleanfalseenables or disabled OIDC login for the DB ConsoleBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.principal_regex
string(.+)regular expression to apply to extracted principal (see claim_json_key setting) to translate to SQL user (golang regex format, must include 1 grouping to extract)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider.custom_ca
stringsets the PEM encoded custom root CA for verifying certificates while authenticating through the OIDC providerBasic/Standard/Advanced/Self-Hosted
server.oidc_authentication.provider_url
stringsets OIDC provider URL ({provider_url}/.well-known/openid-configuration must resolve)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.redirect_url
stringhttps://localhost:8080/oidc/v1/callbacksets OIDC redirect URL via a URL string or a JSON string containing a required `redirect_urls` key with an object that maps from region keys to URL strings (URLs should point to your load balancer and must route to the path /oidc/v1/callback)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.scopes
stringopenidsets OIDC scopes to include with authentication request (space delimited list of strings, required to start with `openid`)Basic/Standard/Advanced/Self-Hosted
server.oidc_authentication.tls_insecure_skip_verify.enabled
booleanfalseif true, TLS certificate verification is skipped for connections to the OIDC provider (insecure)Basic/Standard/Advanced/Self-Hosted
server.rangelog.ttl
duration720h0m0sif nonzero, entries in system.rangelog older than this duration are periodically purgedAdvanced/Self-Hosted
server.redact_sensitive_settings.enabled
booleanfalseenables or disables the redaction of sensitive settings in the output of SHOW CLUSTER SETTINGS and SHOW ALL CLUSTER SETTINGS for users without the MODIFYCLUSTERSETTING privilegeBasic/Standard/Advanced/Self-Hosted
server.shutdown.connections.timeout
(alias: server.shutdown.connection_wait)
duration0sthe maximum amount of time a server waits for all SQL connections to be closed before proceeding with a drain. (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.shutdown.initial_wait
(alias: server.shutdown.drain_wait)
duration0sthe amount of time a server waits in an unready state before proceeding with a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting. --drain-wait is to specify the duration of the whole draining process, while server.shutdown.initial_wait is to set the wait time for health probes to notice that the node is not ready.)Basic/Standard/Advanced/Self-Hosted
server.shutdown.lease_transfer_iteration.timeout
(alias: server.shutdown.lease_transfer_wait)
duration5sthe timeout for a single iteration of the range lease transfer phase of draining (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Advanced/Self-Hosted
server.shutdown.transactions.timeout
(alias: server.shutdown.query_wait)
duration10sthe timeout for waiting for active transactions to finish during a drain (note that the --drain-wait parameter for cockroach node drain may need adjustment after changing this setting)Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.count
integer3maximum number of probes that will be sent out before a connection is dropped because it's unresponsive (Linux and Darwin only). The value 0 is the operating system default.Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.idle
duration0stime with no network activity before sending a TCP keepalive probe (Linux and Darwin only). If 0, the value of server.sql_tcp_keep_alive.interval is used. The value 0 is the operating system default.Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_keep_alive.interval
duration10stime between keep alive probes and idle time before probes are sent out. The value 0 is the operating system default.Basic/Standard/Advanced/Self-Hosted
server.sql_tcp_user.timeout
duration0sspecifies the maximum amount of time that transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. (Linux and Darwin only). The value 0 is the operating system default.Basic/Standard/Advanced/Self-Hosted
server.time_until_store_dead
duration5m0sthe time after which if there is no new gossiped information about a store, it is considered deadBasic/Standard/Advanced/Self-Hosted
server.user_login.cert_password_method.auto_scram_promotion.enabled
booleantruewhether to automatically promote cert-password authentication to use SCRAMBasic/Standard/Advanced/Self-Hosted
server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled
booleantrueif server.user_login.password_encryption=crdb-bcrypt, this controls whether to automatically re-encode stored passwords using scram-sha-256 to crdb-bcryptBasic/Standard/Advanced/Self-Hosted
server.user_login.min_password_length
integer1the minimum length accepted for passwords set in cleartext via SQL. Note that a value lower than 1 is ignored: passwords cannot be empty in any case. This setting only applies when adding new users or altering an existing user's password; it will not affect existing logins.Basic/Standard/Advanced/Self-Hosted
server.user_login.password_encryption
enumerationscram-sha-256which hash method to use to encode cleartext passwords passed via ALTER/CREATE USER/ROLE WITH PASSWORD [crdb-bcrypt = 2, scram-sha-256 = 3]Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.crdb_bcrypt
integer10the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method crdb-bcrypt (allowed range: 4-31)Basic/Standard/Advanced/Self-Hosted
server.user_login.password_hashes.default_cost.scram_sha_256
integer10610the hashing cost to use when storing passwords supplied as cleartext by SQL clients with the hashing method scram-sha-256 (allowed range: 4096-240000000000)Basic/Standard/Advanced/Self-Hosted
server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled
booleantrueif server.user_login.password_hashes.default_cost.scram_sha_256 differs from, the cost in a stored hash, this controls whether to automatically re-encode stored passwords using scram-sha-256 with the new default costBasic/Standard/Advanced/Self-Hosted
server.user_login.timeout
duration10stimeout after which client authentication times out if some system range is unavailable (0 = no timeout)Basic/Standard/Advanced/Self-Hosted
server.user_login.upgrade_bcrypt_stored_passwords_to_scram.enabled
booleantrueif server.user_login.password_encryption=scram-sha-256, this controls whether to automatically re-encode stored passwords using crdb-bcrypt to scram-sha-256Basic/Standard/Advanced/Self-Hosted
server.web_session.purge.ttl
duration1h0m0sif nonzero, entries in system.web_sessions older than this duration are periodically purgedBasic/Standard/Advanced/Self-Hosted
server.web_session.timeout
(alias: server.web_session_timeout)
duration168h0m0sthe duration that a newly created web session will be validBasic/Standard/Advanced/Self-Hosted
spanconfig.bounds.enabled
booleantruedictates whether span config bounds are consulted when serving span configs for secondary tenantsAdvanced/Self-Hosted
spanconfig.range_coalescing.system.enabled
(alias: spanconfig.storage_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs, for the ranges specific to the system tenantAdvanced/Self-Hosted
spanconfig.range_coalescing.application.enabled
(alias: spanconfig.tenant_coalesce_adjacent.enabled)
booleantruecollapse adjacent ranges with the same span configs across all secondary tenant keyspacesAdvanced/Self-Hosted
sql.auth.change_own_password.enabled
booleanfalsecontrols whether a user is allowed to change their own password, even if they have no other privilegesBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_for_owner.enabled
booleantruedetermines whether the GRANT OPTION for privileges is implicitly given to the owner of an objectBasic/Standard/Advanced/Self-Hosted
sql.auth.grant_option_inheritance.enabled
booleantruedetermines whether the GRANT OPTION for privileges is inherited through role membershipBasic/Standard/Advanced/Self-Hosted
sql.auth.public_schema_create_privilege.enabled
booleantruedetermines whether to grant all users the CREATE privileges on the public schema when it is createdBasic/Standard/Advanced/Self-Hosted
sql.auth.skip_underlying_view_privilege_checks.enabled
booleanfalsedetermines whether to skip privilege checks on tables underlying views. When enabled, users with SELECT privileges on a view can query it regardless of their privileges on the underlying tables, and row-level security policies are evaluated as the invoking user rather than the view owner. This restores pre-v26.2 behavior.Basic/Standard/Advanced/Self-Hosted
sql.catalog.allow_leased_descriptors.enabled
booleantrueif true, catalog views (crdb_internal, information_schema, pg_catalog) can use leased descriptors for improved performanceBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.capacity
integer1000the maximum number of sessions in the cacheBasic/Standard/Advanced/Self-Hosted
sql.closed_session_cache.time_to_live
integer3600the maximum time to live, in secondsBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.capacity
byte size64 MiBthe in-memory storage capacity per-node of contention event storeBasic/Standard/Advanced/Self-Hosted
sql.contention.event_store.duration_threshold
duration0sminimum contention duration to cause the contention events to be collected into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.record_serialization_conflicts.enabled
booleantrueenables recording 40001 errors with conflicting txn meta as SERIALIZATION_CONFLICTcontention events into crdb_internal.transaction_contention_eventsBasic/Standard/Advanced/Self-Hosted
sql.contention.txn_id_cache.max_size
byte size64 MiBthe maximum byte size TxnID cache will use (set to 0 to disable)Basic/Standard/Advanced/Self-Hosted
sql.cross_db_fks.enabled
booleanfalseif true, creating foreign key references across databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_owners.enabled
booleanfalseif true, creating sequences owned by tables from other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_sequence_references.enabled
booleanfalseif true, sequences referenced by tables from other databases are allowedBasic/Standard/Advanced/Self-Hosted
sql.cross_db_views.enabled
booleanfalseif true, creating views that refer to other databases is allowedBasic/Standard/Advanced/Self-Hosted
sql.defaults.cost_scans_with_default_col_size.enabled
booleanfalsesetting to true uses the same size for all columns to compute scan cost
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.datestyle
enumerationiso, mdydefault value for DateStyle session setting [iso, mdy = 0, iso, dmy = 1, iso, ymd = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_hash_sharded_index_bucket_count
integer16used as bucket count if bucket count is not specified in hash sharded index definition
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.default_int_size
integer8the size, in bytes, of an INT type
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.disallow_full_table_scans.enabled
booleanfalsesetting to true rejects queries that have planned a full table scan; set large_full_scan_rows > 0 to allow small full table scans estimated to read fewer than large_full_scan_rows
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.distsql
enumerationautodefault distributed SQL execution mode [off = 0, auto = 1, on = 2, always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_alter_column_type.enabled
booleanfalsedefault value for experimental_alter_column_type session setting; enables the use of ALTER COLUMN TYPE for general conversions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_distsql_planning
enumerationoffdefault experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning [off = 0, on = 1]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_enable_unique_without_index_constraints.enabled
booleanfalsedefault value for experimental_enable_unique_without_index_constraints session setting;disables unique without index constraints by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_implicit_column_partitioning.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for the use of implicit column partitioning
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.experimental_temporary_tables.enabled
booleanfalsedefault value for experimental_enable_temp_tables; allows for use of temporary tables by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.foreign_key_cascades_limit
integer10000default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_session_timeout
duration0sdefault value for the idle_in_session_timeout; default value for the idle_in_session_timeout session setting; controls the duration a session is permitted to idle before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.idle_in_transaction_session_timeout
duration0sdefault value for the idle_in_transaction_session_timeout; controls the duration a session is permitted to idle in a transaction before the session is terminated; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.implicit_select_for_update.enabled
booleantruedefault value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.insert_fast_path.enabled
booleantruedefault value for enable_insert_fast_path session setting; enables a specialized insert path
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.intervalstyle
enumerationpostgresdefault value for IntervalStyle session setting [postgres = 0, iso_8601 = 1, sql_standard = 2]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.large_full_scan_rows
float0default value for large_full_scan_rows session variable which determines the table size at which full scans are considered large and disallowed when disallow_full_table_scans is set to true; set to 0 to reject all full table or full index scans when disallow_full_table_scans is true
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.locality_optimized_partitioned_index_scan.enabled
booleantruedefault value for locality_optimized_partitioned_index_scan session setting; enables searching for rows in the current region before searching remote regions
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.lock_timeout
duration0sdefault value for the lock_timeout; default value for the lock_timeout session setting; controls the duration a query is permitted to wait while attempting to acquire a lock on a key or while blocking on an existing lock in order to perform a non-locking read on a key; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.on_update_rehome_row.enabled
booleantruedefault value for on_update_rehome_row; enables ON UPDATE rehome_row() expressions to trigger on updates
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_histograms.enabled
booleantruedefault value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.optimizer_use_multicol_stats.enabled
booleantruedefault value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_alter_primary_region_in_super_region.enabled
booleanfalsedefault value for override_alter_primary_region_in_super_region; allows for altering the primary region even if the primary region is a member of a super region
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.override_multi_region_zone_config.enabled
booleanfalsedefault value for override_multi_region_zone_config; allows for overriding the zone configs of a multi-region table or database
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.prefer_lookup_joins_for_fks.enabled
booleanfalsedefault value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.primary_region
stringif not empty, all databases created without a PRIMARY REGION will implicitly have the given PRIMARY REGION
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.reorder_joins_limit
integer8default number of joins to reorder
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.require_explicit_primary_keys.enabled
booleanfalsedefault value for requiring explicit primary keys in CREATE TABLE statements
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.results_buffer.size
byte size512 KiBdefault size of the buffer that accumulates results for a statement or a batch of statements before they are sent to the client. This can be overridden on an individual connection with the 'results_buffer_size' parameter. Note that auto-retries generally only happen while no results have been delivered to the client, so reducing this size can increase the number of retriable errors a client receives. On the other hand, increasing the buffer size can increase the delay until the client receives the first result row. Updating the setting only affects new connections. Setting to 0 disables any buffering.
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.serial_normalization
enumerationrowiddefault handling of SERIAL in table definitions [rowid = 0, virtual_sequence = 1, sql_sequence = 2, sql_sequence_cached = 3, unordered_rowid = 4, sql_sequence_cached_node = 5]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.statement_timeout
duration0sdefault value for the statement_timeout; default value for the statement_timeout session setting; controls the duration a query is permitted to run before it is canceled; if set to 0, there is no timeout
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.stub_catalog_tables.enabled
booleantruedefault value for stub_catalog_tables session setting
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_err
integer0the limit for the number of rows read by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_read_log
integer0the threshold for the number of rows read by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_err
integer0the limit for the number of rows written by a SQL transaction which - once exceeded - will fail the transaction (or will trigger a logging event to SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.transaction_rows_written_log
integer0the threshold for the number of rows written by a SQL transaction which - once exceeded - will trigger a logging event to SQL_PERF (or SQL_INTERNAL_PERF for internal transactions); use 0 to disable
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.use_declarative_schema_changer
enumerationondefault value for use_declarative_schema_changer session setting;disables new schema changer by default [off = 0, on = 1, unsafe = 2, unsafe_always = 3]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.vectorize
enumerationondefault vectorize mode [on = 0, on = 1, on = 2, experimental_always = 3, off = 4]
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.defaults.zigzag_join.enabled
booleanfalsedefault value for enable_zigzag_join session setting; disallows use of zig-zag join by default
This cluster setting is being kept to preserve backwards-compatibility.
This session variable default should now be configured using ALTER ROLE... SET
Basic/Standard/Advanced/Self-Hosted
sql.distsql.temp_storage.workmem
byte size64 MiBmaximum amount of memory in bytes a processor can use before falling back to temp storageBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_err
byte size80 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an error is returned; use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.guardrails.max_row_size_log
byte size16 MiBmaximum size of row (or column family if multiple column families are in use) that SQL can write to the database, above which an event is logged to SQL_PERF (or SQL_INTERNAL_PERF if the mutating statement was internal); use 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.hash_sharded_range_pre_split.max
integer16max pre-split ranges to have when adding hash sharded index to an existing tableBasic/Standard/Advanced/Self-Hosted
sql.index_recommendation.drop_unused_duration
duration168h0m0sthe index unused duration at which we begin to recommend dropping the indexBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.enabled
booleantrueenable per-fingerprint latency recording and anomaly detectionBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.latency_threshold
duration50msstatements must surpass this threshold to trigger anomaly detection and identificationBasic/Standard/Advanced/Self-Hosted
sql.insights.anomaly_detection.memory_limit
byte size1.0 MiBthe maximum amount of memory allowed for tracking statement latenciesBasic/Standard/Advanced/Self-Hosted
sql.insights.execution_insights_capacity
integer1000the size of the per-node store of execution insightsBasic/Standard/Advanced/Self-Hosted
sql.insights.high_retry_count.threshold
integer10the number of retries a slow statement must have undergone for its high retry count to be highlighted as a potential problemBasic/Standard/Advanced/Self-Hosted
sql.insights.latency_threshold
duration100msamount of time after which an executing statement is considered slow. Use 0 to disable.Basic/Standard/Advanced/Self-Hosted
sql.log.redact_names.enabled
booleanfalseif set, schema object identifers are redacted in SQL statements that appear in event logsBasic/Standard/Advanced/Self-Hosted
sql.log.scan_row_count_misestimate.enabled
booleanfalsewhen set to true, log a warning when a scan's actual row count differs significantly from the optimizer's estimateBasic/Standard/Advanced/Self-Hosted
sql.log.slow_query.experimental_full_table_scans.enabled
booleanfalsewhen set to true, statements that perform a full table/index scan will be logged to the slow query log even if they do not meet the latency threshold. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.internal_queries.enabled
booleanfalsewhen set to true, internal queries which exceed the slow query log threshold are logged to a separate log. Must have the slow query log enabled for this setting to have any effect.Basic/Standard/Advanced/Self-Hosted
sql.log.slow_query.latency_threshold
duration0swhen set to non-zero, log statements whose service latency exceeds the threshold to a secondary logger on each nodeBasic/Standard/Advanced/Self-Hosted
sql.log.user_audit
stringuser/role-based audit logging configurationBasic/Standard/Advanced/Self-Hosted
sql.log.user_audit.reduced_config.enabled
booleanfalseenables logic to compute a reduced audit configuration, computing the audit configuration only once at session start instead of at each SQL event. The tradeoff with the increase in performance (~5%), is that changes to the audit configuration (user role memberships/cluster setting) are not reflected within session. Users will need to start a new session to see these changes in their auditing behaviour.Basic/Standard/Advanced/Self-Hosted
sql.metrics.application_name.enabled
booleanfalsewhen enabled, SQL metrics would export application name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.database_name.enabled
booleanfalsewhen enabled, SQL metrics would export database name as and additional label as part of child metrics. The number of unique label combinations is limited to 5000 by default.Basic/Standard/Advanced/Self-Hosted
sql.metrics.index_usage_stats.enabled
booleantruecollect per index usage statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_stmt_fingerprints
integer100000the maximum number of reported statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_reported_txn_fingerprints
integer100000the maximum number of reported transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_stmt_fingerprints
integer7500the maximum number of statement fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.max_mem_txn_fingerprints
integer7500the maximum number of transaction fingerprints stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.dump_to_logs.enabled
(alias: sql.metrics.statement_details.dump_to_logs)
booleanfalsedump collected statement statistics to node logs when periodically clearedBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.enabled
booleantruecollect per-statement query statisticsBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.gateway_node.enabled
booleanfalsesave the gateway node for each statement fingerprint. If false, the value will be stored as 0.Basic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.index_recommendation_collection.enabled
booleantruegenerate an index recommendation for each fingerprint IDBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.max_mem_reported_idx_recommendations
integer5000the maximum number of reported index recommendation info stored in memoryBasic/Standard/Advanced/Self-Hosted
sql.metrics.statement_details.threshold
duration0sminimum execution time to cause statement statistics to be collected. If configured, no transaction stats are collected.Basic/Standard/Advanced/Self-Hosted
sql.metrics.transaction_details.enabled
booleantruecollect per-application transaction statisticsBasic/Standard/Advanced/Self-Hosted
sql.multiple_modifications_of_table.enabled
booleanfalseif true, allow statements containing multiple INSERT ON CONFLICT, UPSERT, UPDATE, or DELETE subqueries modifying the same table, at the risk of data corruption if the same row is modified multiple times by a single statement (multiple INSERT subqueries without ON CONFLICT cannot cause corruption and are always allowed)Basic/Standard/Advanced/Self-Hosted
sql.multiregion.drop_primary_region.enabled
booleantrueallows dropping the PRIMARY REGION of a database if it is the last regionBasic/Standard/Advanced/Self-Hosted
sql.notices.enabled
booleantrueenable notices in the server/client protocol being sentBasic/Standard/Advanced/Self-Hosted
sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled
booleanfalseif enabled, uniqueness checks may be planned for mutations of UUID columns updated with gen_random_uuid(); otherwise, uniqueness is assumed due to near-zero collision probabilityBasic/Standard/Advanced/Self-Hosted
sql.schema.approx_max_object_count
integer20000approximate maximum number of schema objects allowed in the cluster; the check uses cached statistics, so the actual count may slightly exceed this limit; set to 0 to disableBasic/Standard/Advanced/Self-Hosted
sql.schema.auto_unlock.enabled
booleantruecontrols whether DDL operations will attempt to automatically unlock and re-lock schema_locked tables. When this setting is false, DDL on schema_locked tables is blocked unless the user manually unlocks the table first. The schema_locked storage parameter improves changefeed performance by locking the table's schema from the perspective of the changefeed.Basic/Standard/Advanced/Self-Hosted
sql.schema.telemetry.recurrence
string@weeklycron-tab recurrence for SQL schema telemetry jobAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
sql.spatial.experimental_box2d_comparison_operators.enabled
booleanfalseenables the use of certain experimental box2d comparison operatorsBasic/Standard/Advanced/Self-Hosted
sql.sqlcommenter.enabled
booleanfalseenables support for sqlcommenter. Key value parsed from sqlcommenter comments will be included in sql insights and sql logs. See https://google.github.io/sqlcommenter/ for more details.Basic/Standard/Advanced/Self-Hosted
sql.stats.activity.persisted_rows.max
integer200000maximum number of rows of statement and transaction activity that will be persisted in the system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.enabled
booleantrueautomatic statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.fraction_stale_rows
float0.2target fraction of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_collection.min_stale_rows
integer500target minimum number of stale rows per table that will trigger a statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_extremes_concurrency_limit
integer128determines the maximum number of concurrent automatic partial USING EXTREMES table statistics collection jobsBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_full_collection.enabled
booleantrueautomatic full statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_full_concurrency_limit
integerSee description.determines the maximum number of concurrent automatic full table statistics collection jobs. The default value is computed as the number of vCPUs in a node divided by 2.Basic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.enabled
booleantrueautomatic partial statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.fraction_stale_rows
float0.05target fraction of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.automatic_partial_collection.min_stale_rows
integer100target minimum number of stale rows per table that will trigger a partial statistics refreshBasic/Standard/Advanced/Self-Hosted
sql.stats.canary_fraction
float0probability that table statistics will use canary mode instead of stable mode for query planning [0.0-1.0]Basic/Standard/Advanced/Self-Hosted
sql.stats.cleanup.recurrence
string@hourlycron-tab recurrence for SQL Stats cleanup jobBasic/Standard/Advanced/Self-Hosted
sql.stats.detailed_latency_metrics.enabled
booleanfalselabel latency metrics with the statement fingerprint. Workloads with tens of thousands of distinct query fingerprints should leave this setting false. (experimental, affects performance for workloads with high fingerprint cardinality)Basic/Standard/Advanced/Self-Hosted
sql.stats.error_on_concurrent_create_stats.enabled
booleanfalseset to true to error on concurrent CREATE STATISTICS jobs, instead of skipping themBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.enabled
booleantrueif set, SQL execution statistics are periodically flushed to diskBasic/Standard/Advanced/Self-Hosted
sql.stats.flush.interval
duration10m0sthe interval at which SQL execution statistics are flushed to disk, this value must be less than or equal to 1 hourBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.enabled
booleantruewhen true, enables generation of statistics forecasts by default for all tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.max_decrease
float0.3333333333333333the most a prediction is allowed to decrease, expressed as the minimum ratio of the prediction to the lowest prior observationBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_goodness_of_fit
float0.95the minimum R² (goodness of fit) measurement required from all predictive models to use a forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.forecasts.min_observations
integer3the mimimum number of observed statistics required to produce a statistics forecastBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.count
integer200maximum number of histogram buckets to build during table statistics collectionBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.include_most_common_values.enabled
booleantruewhether to include most common values as histogram bucketsBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_buckets.max_fraction_most_common_values
float0.1maximum fraction of histogram buckets to use for most common valuesBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_collection.enabled
booleantruehistogram collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.histogram_samples.count
integer0number of rows sampled for histogram construction during table statistics collection. Not setting this or setting a value of 0 means that a reasonable sample size will be automatically picked based on the table size.Basic/Standard/Advanced/Self-Hosted
sql.stats.multi_column_collection.enabled
booleantruemulti-column statistics collection modeBasic/Standard/Advanced/Self-Hosted
sql.stats.non_default_columns.min_retention_period
duration24h0m0sminimum retention period for table statistics collected on non-default columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.non_indexed_json_histograms.enabled
booleanfalseset to true to collect table statistics histograms on non-indexed JSON columnsBasic/Standard/Advanced/Self-Hosted
sql.stats.persisted_rows.max
integer1000000maximum number of rows of statement and transaction statistics that will be persisted in the system tables before compaction beginsBasic/Standard/Advanced/Self-Hosted
sql.stats.post_events.enabled
booleanfalseif set, an event is logged for every successful CREATE STATISTICS jobBasic/Standard/Advanced/Self-Hosted
sql.stats.response.max
integer20000the maximum number of statements and transaction stats returned in a CombinedStatements requestBasic/Standard/Advanced/Self-Hosted
sql.stats.response.show_internal.enabled
booleanfalsecontrols if statistics for internal executions should be returned by the CombinedStatements and if internal sessions should be returned by the ListSessions endpoints. These endpoints are used to display statistics on the SQL Activity pagesBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables.enabled
booleantruewhen true, enables use of statistics on system tables by the query optimizerBasic/Standard/Advanced/Self-Hosted
sql.stats.system_tables_autostats.enabled
booleantruewhen true, enables automatic collection of statistics on system tablesBasic/Standard/Advanced/Self-Hosted
sql.stats.table_statistics_cache.capacity
integer256the maximum number of table statistics entries stored in the LRU cache. Each cache entry corresponds to a single table.Basic/Standard/Advanced/Self-Hosted
sql.stats.virtual_computed_columns.enabled
booleantrueset to true to collect table statistics on virtual computed columnsBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.enabled
booleanfalsewhen set to true, executed queries will emit an event on the telemetry logging channelBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.internal.enabled
booleanfalsewhen set to true, internal queries will be sampled in telemetry loggingBasic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample executions for telemetry, note that it is recommended that this value shares a log-line limit of 10 logs per second on the telemetry pipeline with all other telemetry events. If sampling mode is set to 'transaction', this value is ignored.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.query_sampling.mode
enumerationstatementthe execution level used for telemetry sampling. If set to 'statement', events are sampled at the statement execution level. If set to 'transaction', events are sampled at the transaction execution level, i.e. all statements for a transaction will be logged and are counted together as one sampled event (events are still emitted one per statement). [statement = 0, transaction = 1]Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.max_event_frequency
integer8the max event frequency (events per second) at which we sample transactions for telemetry. If sampling mode is set to 'statement', this setting is ignored. In practice, this means that we only sample a transaction if 1/max_event_frequency seconds have elapsed since the last transaction was sampled.Basic/Standard/Advanced/Self-Hosted
sql.telemetry.transaction_sampling.statement_events_per_transaction.max
integer50the maximum number of statement events to log for every sampled transaction. Note that statements that are logged by force do not adhere to this limit.Basic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.cleanup_interval
duration30m0show often to clean up orphaned temporary objectsBasic/Standard/Advanced/Self-Hosted
sql.temp_object_cleaner.wait_interval
duration30m0show long after creation a temporary object will be cleaned upBasic/Standard/Advanced/Self-Hosted
sql.log.all_statements.enabled
(alias: sql.trace.log_statement_execute)
booleanfalseset to true to enable logging of all executed statementsBasic/Standard/Advanced/Self-Hosted
sql.trace.stmt.enable_threshold
duration0senables tracing on all statements; statements executing for longer than this duration will have their trace logged (set to 0 to disable); note that enabling this may have a negative performance impact; this setting applies to individual statements within a transaction and is therefore finer-grained than sql.trace.txn.enable_thresholdBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.enable_threshold
duration0senables transaction traces for transactions exceeding this duration, used with `sql.trace.txn.sample_rate`Basic/Standard/Advanced/Self-Hosted
sql.trace.txn.include_internal.enabled
booleantrueenables tracing internal transactions as well as external workload using sample rate and threshold settingsBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.jaeger_json_output.enabled
booleanfalseenables Jaeger JSON output for transaction traces in logsBasic/Standard/Advanced/Self-Hosted
sql.trace.txn.sample_rate
float1enables probabilistic transaction tracing. It should be used in conjunction with `sql.trace.txn.enable_threshold`. A percentage of transactions between 0 and 1.0 will have tracing enabled, and only those which exceed the configured threshold will be logged.Basic/Standard/Advanced/Self-Hosted
sql.ttl.changefeed_replication.disabled
booleanfalseif true, deletes issued by TTL will not be replicated via changefeeds (this setting will be ignored by changefeeds that have the ignore_disable_changefeed_replication option set; such changefeeds will continue to replicate all TTL deletes)Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_batch_size
integer100default amount of rows to delete in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_delete_rate_limit
integer100default delete rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_batch_size
integer500default amount of rows to select in a single query during a TTL jobBasic/Standard/Advanced/Self-Hosted
sql.ttl.default_select_rate_limit
integer0default select rate limit (rows per second) per node for each TTL job. Use 0 to signify no rate limit.Basic/Standard/Advanced/Self-Hosted
sql.ttl.job.enabled
booleantruewhether the TTL job is enabledBasic/Standard/Advanced/Self-Hosted
sql.txn.read_committed_isolation.enabled
booleantrueset to true to allow transactions to use the READ COMMITTED isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn.repeatable_read_isolation.enabled
(alias: sql.txn.snapshot_isolation.enabled)
booleanfalseset to true to allow transactions to use the REPEATABLE READ isolation level if specified by BEGIN/SET commandsBasic/Standard/Advanced/Self-Hosted
sql.txn_fingerprint_id_cache.capacity
integer100the maximum number of txn fingerprint IDs storedBasic/Standard/Advanced/Self-Hosted
sql.vecindex.stalled_op.timeout
duration100msamount of time before other vector index workers will assist with a stalled background fixupBasic/Standard/Advanced/Self-Hosted
storage.delete_compaction_excise.enabled
booleantrueset to false to direct Pebble to not partially excise sstables in delete-only compactionsAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.ingest_split.enabled
booleantrueset to false to disable ingest-time splitting that lowers write-amplificationAdvanced/Self-Hosted
storage.ingestion.value_blocks.enabled
booleantrueset to true to enable writing of value blocks in ingestion sstablesBasic/Standard/Advanced/Self-Hosted
storage.max_sync_duration
duration20smaximum duration for disk operations; any operations that take longer than this setting trigger a warning log entry or process crashAdvanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.max_sync_duration.fatal.enabled
booleantrueif true, fatal the process when a disk operation exceeds storage.max_sync_durationBasic/Standard/Advanced/Self-Hosted
storage.sstable.compression_algorithm
enumerationfastestdetermines the compression algorithm to use for Pebble stores [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, balanced = 6, good = 7, fast = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_storage
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup row data storage (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.sstable.compression_algorithm_backup_transport
enumerationfastestdetermines the compression algorithm to use when compressing sstable data blocks for backup transport (fast,balanced,good are experimental); [snappy = 1, zstd = 2, none = 3, minlz = 4, fastest = 5, fast = 6, balanced = 7, good = 8]Advanced/Self-hosted (read-write); Basic/Standard (read-only)
storage.unhealthy_write_duration
duration20sduration for disk write operations, beyond which the disk will be reported as unhealthy for higher layer actionsAdvanced/Self-Hosted
storage.wal_failover.unhealthy_op_threshold
duration100msthe latency of a WAL write considered unhealthy and triggers a failover to a secondary WAL locationAdvanced/Self-Hosted
timeseries.storage.enabled
booleantrueif set, periodic timeseries data is stored within the cluster; disabling is not recommended unless you are storing the data elsewhereAdvanced/Self-Hosted
timeseries.storage.resolution_10s.ttl
duration240h0m0sthe maximum age of time series data stored at the 10 second resolution. Data older than this is subject to rollup and deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
timeseries.storage.resolution_1m.ttl
duration240h0m0sthe maximum age of time series data stored at the 1 minute resolution. Data older than this is subject to rollup and deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
timeseries.storage.resolution_30m.ttl
duration2160h0m0sthe maximum age of time series data stored at the 30 minute resolution. Data older than this is subject to deletion.Advanced/Self-hosted (read-write); Basic/Standard (read-only)
trace.debug_http_endpoint.enabled
(alias: trace.debug.enable)
booleanfalseif set, traces for recent requests can be seen at https://<ui>/debug/requestsBasic/Standard/Advanced/Self-Hosted
trace.opentelemetry.collector
stringaddress of an OpenTelemetry trace collector to receive traces using the otel gRPC protocol, as <host>:<port>. If no port is specified, 4317 will be used.Basic/Standard/Advanced/Self-Hosted
trace.snapshot.rate
duration0sif non-zero, interval at which background trace snapshots are capturedBasic/Standard/Advanced/Self-Hosted
trace.span_registry.enabled
booleanfalseif set, ongoing traces can be seen at https://<ui>/#/debug/tracezBasic/Standard/Advanced/Self-Hosted
trace.zipkin.collector
stringthe address of a Zipkin instance to receive traces, as <host>:<port>. If no port is specified, 9411 will be used.Basic/Standard/Advanced/Self-Hosted
ui.database_locality_metadata.enabled
booleantrueif enabled shows extended locality data about databases and tables in DB Console which can be expensive to computeBasic/Standard/Advanced/Self-Hosted
ui.default_timezone
stringthe default timezone used to format timestamps in the uiBasic/Standard/Advanced/Self-Hosted
ui.display_timezone
enumerationetc/utcthe timezone used to format timestamps in the ui. This setting is deprecatedand will be removed in a future version. Use the 'ui.default_timezone' setting instead. 'ui.default_timezone' takes precedence over this setting. [etc/utc = 0, america/new_york = 1]Basic/Standard/Advanced/Self-Hosted
version
version26.2set the active cluster version in the format '<major>.<minor>'Basic/Standard/Advanced/Self-Hosted
diff --git a/src/current/_includes/cockroach-generated/release-26.2/sql/aggregates.md b/src/current/_includes/cockroach-generated/release-26.2/sql/aggregates.md new file mode 100644 index 00000000000..5bf74149140 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/sql/aggregates.md @@ -0,0 +1,599 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_agg(arg1: bool) → bool[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bool[]) → bool[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes) → bytes[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: bytes[]) → bytes[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date) → date[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: date[]) → date[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal) → decimal[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: decimal[]) → decimal[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float) → float[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: float[]) → float[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet) → inet[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: inet[]) → inet[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int) → int[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: int[]) → int[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval) → interval[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: interval[]) → interval[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string) → string[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: string[]) → string[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time) → time[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: time[]) → time[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp) → timestamp[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamp[]) → timestamp[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz) → timestamptz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timestamptz[]) → timestamptz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid) → uuid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: uuid[]) → uuid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum) → anyenum[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: anyenum[]) → anyenum[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d) → box2d[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: box2d[]) → box2d[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography) → geography[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geography[]) → geography[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry) → geometry[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: geometry[]) → geometry[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb) → jsonb[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: jsonb[]) → jsonb[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: ltree) → ltree[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: ltree[]) → ltree[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid) → oid[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: oid[]) → oid[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn) → pg_lsn[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: pg_lsn[]) → pg_lsn[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor) → refcursor[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: refcursor[]) → refcursor[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz) → timetz[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: timetz[]) → timetz[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple) → tuple[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: tuple[]) → tuple[][]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit) → varbit[]

Aggregates the selected values into an array.

+
Immutable
array_agg(arg1: varbit[]) → varbit[][]

Aggregates the selected values into an array.

+
Immutable
array_cat_agg(arg1: bool[]) → bool[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: bytes[]) → bytes[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: date[]) → date[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: decimal[]) → decimal[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: float[]) → float[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: inet[]) → inet[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: int[]) → int[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: interval[]) → interval[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: string[]) → string[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: time[]) → time[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamp[]) → timestamp[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timestamptz[]) → timestamptz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: uuid[]) → uuid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: anyenum[]) → anyenum[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: box2d[]) → box2d[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geography[]) → geography[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: geometry[]) → geometry[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: jsonb[]) → jsonb[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: ltree[]) → ltree[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: oid[]) → oid[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: pg_lsn[]) → pg_lsn[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: refcursor[]) → refcursor[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: timetz[]) → timetz[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: tuple[]) → tuple[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
array_cat_agg(arg1: varbit[]) → varbit[]

Unnests the selected arrays into elements that are then aggregated into a single array.

+
Immutable
avg(arg1: decimal) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: float) → float

Calculates the average of the selected values.

+
Immutable
avg(arg1: int) → decimal

Calculates the average of the selected values.

+
Immutable
avg(arg1: interval) → interval

Calculates the average of the selected values.

+
Immutable
bit_and(arg1: int) → int

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_and(arg1: varbit) → varbit

Calculates the bitwise AND of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: int) → int

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bit_or(arg1: varbit) → varbit

Calculates the bitwise OR of all non-null input values, or null if none.

+
Immutable
bool_and(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
bool_or(arg1: bool) → bool

Calculates the boolean value of ORing all selected values.

+
Immutable
concat_agg(arg1: bytes) → bytes

Concatenates all selected values.

+
Immutable
concat_agg(arg1: string) → string

Concatenates all selected values.

+
Immutable
corr(arg1: decimal, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: decimal, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: float, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: decimal) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: float) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
corr(arg1: int, arg2: int) → float

Calculates the correlation coefficient of the selected values.

+
Immutable
count(arg1: anyelement) → int

Calculates the number of selected elements.

+
Immutable
count_rows() → int

Calculates the number of rows.

+
Immutable
covar_pop(arg1: decimal, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: decimal, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: float, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: decimal) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: float) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_pop(arg1: int, arg2: int) → float

Calculates the population covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: decimal, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: float, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: decimal) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: float) → float

Calculates the sample covariance of the selected values.

+
Immutable
covar_samp(arg1: int, arg2: int) → float

Calculates the sample covariance of the selected values.

+
Immutable
every(arg1: bool) → bool

Calculates the boolean value of ANDing all selected values.

+
Immutable
json_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
json_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
jsonb_agg(arg1: anyelement) → jsonb

Aggregates values as a JSON or JSONB array.

+
Stable
jsonb_object_agg(arg1: string, arg2: anyelement) → jsonb

Aggregates values as a JSON or JSONB object.

+
Stable
max(arg1: bool) → bool

Identifies the maximum selected value.

+
Immutable
max(arg1: bytes) → bytes

Identifies the maximum selected value.

+
Immutable
max(arg1: date) → date

Identifies the maximum selected value.

+
Immutable
max(arg1: decimal) → decimal

Identifies the maximum selected value.

+
Immutable
max(arg1: float) → float

Identifies the maximum selected value.

+
Immutable
max(arg1: inet) → inet

Identifies the maximum selected value.

+
Immutable
max(arg1: int) → int

Identifies the maximum selected value.

+
Immutable
max(arg1: interval) → interval

Identifies the maximum selected value.

+
Immutable
max(arg1: string) → string

Identifies the maximum selected value.

+
Immutable
max(arg1: time) → time

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamp) → timestamp

Identifies the maximum selected value.

+
Immutable
max(arg1: timestamptz) → timestamptz

Identifies the maximum selected value.

+
Immutable
max(arg1: uuid) → uuid

Identifies the maximum selected value.

+
Immutable
max(arg1: anyenum) → anyenum

Identifies the maximum selected value.

+
Immutable
max(arg1: box2d) → box2d

Identifies the maximum selected value.

+
Immutable
max(arg1: collatedstring{*}) → collatedstring{*}

Identifies the maximum selected value.

+
Immutable
max(arg1: geography) → geography

Identifies the maximum selected value.

+
Immutable
max(arg1: geometry) → geometry

Identifies the maximum selected value.

+
Immutable
max(arg1: jsonb) → jsonb

Identifies the maximum selected value.

+
Immutable
max(arg1: ltree) → ltree

Identifies the maximum selected value.

+
Immutable
max(arg1: oid) → oid

Identifies the maximum selected value.

+
Immutable
max(arg1: pg_lsn) → pg_lsn

Identifies the maximum selected value.

+
Immutable
max(arg1: timetz) → timetz

Identifies the maximum selected value.

+
Immutable
max(arg1: varbit) → varbit

Identifies the maximum selected value.

+
Immutable
min(arg1: bool) → bool

Identifies the minimum selected value.

+
Immutable
min(arg1: bytes) → bytes

Identifies the minimum selected value.

+
Immutable
min(arg1: date) → date

Identifies the minimum selected value.

+
Immutable
min(arg1: decimal) → decimal

Identifies the minimum selected value.

+
Immutable
min(arg1: float) → float

Identifies the minimum selected value.

+
Immutable
min(arg1: inet) → inet

Identifies the minimum selected value.

+
Immutable
min(arg1: int) → int

Identifies the minimum selected value.

+
Immutable
min(arg1: interval) → interval

Identifies the minimum selected value.

+
Immutable
min(arg1: string) → string

Identifies the minimum selected value.

+
Immutable
min(arg1: time) → time

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamp) → timestamp

Identifies the minimum selected value.

+
Immutable
min(arg1: timestamptz) → timestamptz

Identifies the minimum selected value.

+
Immutable
min(arg1: uuid) → uuid

Identifies the minimum selected value.

+
Immutable
min(arg1: anyenum) → anyenum

Identifies the minimum selected value.

+
Immutable
min(arg1: box2d) → box2d

Identifies the minimum selected value.

+
Immutable
min(arg1: collatedstring{*}) → collatedstring{*}

Identifies the minimum selected value.

+
Immutable
min(arg1: geography) → geography

Identifies the minimum selected value.

+
Immutable
min(arg1: geometry) → geometry

Identifies the minimum selected value.

+
Immutable
min(arg1: jsonb) → jsonb

Identifies the minimum selected value.

+
Immutable
min(arg1: ltree) → ltree

Identifies the minimum selected value.

+
Immutable
min(arg1: oid) → oid

Identifies the minimum selected value.

+
Immutable
min(arg1: pg_lsn) → pg_lsn

Identifies the minimum selected value.

+
Immutable
min(arg1: timetz) → timetz

Identifies the minimum selected value.

+
Immutable
min(arg1: varbit) → varbit

Identifies the minimum selected value.

+
Immutable
percentile_cont(arg1: float) → float

Continuous percentile: returns a float corresponding to the specified fraction in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float) → interval

Continuous percentile: returns an interval corresponding to the specified fraction in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_cont(arg1: float[]) → float[]

Continuous percentile: returns floats corresponding to the specified fractions in the ordering, interpolating between adjacent input floats if needed.

+
Immutable
percentile_cont(arg1: float[]) → interval[]

Continuous percentile: returns intervals corresponding to the specified fractions in the ordering, interpolating between adjacent input intervals if needed.

+
Immutable
percentile_disc(arg1: float) → anyelement

Discrete percentile: returns the first input value whose position in the ordering equals or exceeds the specified fraction.

+
Immutable
percentile_disc(arg1: float[]) → anyelement

Discrete percentile: returns input values whose position in the ordering equals or exceeds the specified fractions.

+
Immutable
regr_avgx(arg1: decimal, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: decimal, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: float, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: decimal) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: float) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgx(arg1: int, arg2: int) → float

Calculates the average of the independent variable (sum(X)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: decimal, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: float, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: decimal) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: float) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_avgy(arg1: int, arg2: int) → float

Calculates the average of the dependent variable (sum(Y)/N).

+
Immutable
regr_count(arg1: decimal, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: decimal, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: float, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: decimal) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: float) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_count(arg1: int, arg2: int) → int

Calculates number of input rows in which both expressions are nonnull.

+
Immutable
regr_intercept(arg1: decimal, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: decimal, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: float, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: decimal) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: float) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_intercept(arg1: int, arg2: int) → float

Calculates y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_r2(arg1: decimal, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: decimal, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: float, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: decimal) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: float) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_r2(arg1: int, arg2: int) → float

Calculates square of the correlation coefficient.

+
Immutable
regr_slope(arg1: decimal, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: decimal, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: float, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: decimal) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: float) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_slope(arg1: int, arg2: int) → float

Calculates slope of the least-squares-fit linear equation determined by the (X, Y) pairs.

+
Immutable
regr_sxx(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: decimal, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: float, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: decimal) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: float) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxx(arg1: int, arg2: int) → float

Calculates sum of squares of the independent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: decimal, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: float, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: decimal) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: float) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_sxy(arg1: int, arg2: int) → float

Calculates sum of products of independent times dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: decimal, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: float, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: decimal) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: float) → float

Calculates sum of squares of the dependent variable.

+
Immutable
regr_syy(arg1: int, arg2: int) → float

Calculates sum of squares of the dependent variable.

+
Immutable
sqrdiff(arg1: decimal) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: float) → float

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
sqrdiff(arg1: int) → decimal

Calculates the sum of squared differences from the mean of the selected values.

+
Immutable
st_asmvt(arg1: tuple) → bytes

Generates a Mapbox Vector Tile (MVT) representation of a set of rows. Uses default layer name ‘default’ and extent 4096. Expects a geometry column named ‘geom’ in the input rows.

+
Immutable
st_asmvt(arg1: tuple, arg2: string) → bytes

Generates a Mapbox Vector Tile (MVT) representation of a set of rows with the specified layer name. Uses extent 4096 and expects a geometry column named ‘geom’ in the input rows.

+
Immutable
st_asmvt(arg1: tuple, arg2: string, arg3: int) → bytes

Generates a Mapbox Vector Tile (MVT) representation of a set of rows with the specified layer name and extent. Expects a geometry column named ‘geom’ in the input rows.

+
Immutable
st_asmvt(arg1: tuple, arg2: string, arg3: int, arg4: string) → bytes

Generates a Mapbox Vector Tile (MVT) representation of a set of rows with the specified layer name, extent, and geometry column name.

+
Immutable
st_asmvt(arg1: tuple, arg2: string, arg3: int, arg4: string, arg5: string) → bytes

Generates a Mapbox Vector Tile (MVT) representation of a set of rows with the specified layer name, extent, geometry column name, and feature ID column name.

+
Immutable
st_collect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_extent(arg1: geometry) → box2d

Forms a Box2D that encapsulates all provided geometries.

+
Immutable
st_makeline(arg1: geometry) → geometry

Forms a LineString from Point, MultiPoint or LineStrings. Other shapes will be ignored.

+
Immutable
st_memcollect(arg1: geometry) → geometry

Collects geometries into a GeometryCollection or multi-type as appropriate.

+
Immutable
st_memunion(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
st_union(arg1: geometry) → geometry

Applies a spatial union to the geometries provided.

+
Immutable
stddev(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: decimal) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: float) → float

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_pop(arg1: int) → decimal

Calculates the population standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: decimal) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: float) → float

Calculates the standard deviation of the selected values.

+
Immutable
stddev_samp(arg1: int) → decimal

Calculates the standard deviation of the selected values.

+
Immutable
string_agg(arg1: bytes, arg2: bytes) → bytes

Concatenates all selected values using the provided delimiter.

+
Immutable
string_agg(arg1: string, arg2: string) → string

Concatenates all selected values using the provided delimiter.

+
Immutable
sum(arg1: decimal) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: float) → float

Calculates the sum of the selected values.

+
Immutable
sum(arg1: int) → decimal

Calculates the sum of the selected values.

+
Immutable
sum(arg1: interval) → interval

Calculates the sum of the selected values.

+
Immutable
sum_int(arg1: int) → int

Calculates the sum of the selected values.

+
Immutable
var_pop(arg1: decimal) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: float) → float

Calculates the population variance of the selected values.

+
Immutable
var_pop(arg1: int) → decimal

Calculates the population variance of the selected values.

+
Immutable
var_samp(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
var_samp(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: decimal) → decimal

Calculates the variance of the selected values.

+
Immutable
variance(arg1: float) → float

Calculates the variance of the selected values.

+
Immutable
variance(arg1: int) → decimal

Calculates the variance of the selected values.

+
Immutable
xor_agg(arg1: bytes) → bytes

Calculates the bitwise XOR of the selected values.

+
Immutable
xor_agg(arg1: int) → int

Calculates the bitwise XOR of the selected values.

+
Immutable
+ diff --git a/src/current/_includes/cockroach-generated/release-26.2/sql/functions.md b/src/current/_includes/cockroach-generated/release-26.2/sql/functions.md new file mode 100644 index 00000000000..93e7b8480e2 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/sql/functions.md @@ -0,0 +1,3732 @@ +### Array functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_append(array: bool[], elem: bool) → bool[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: bytes[], elem: bytes) → bytes[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: date[], elem: date) → date[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: decimal[], elem: decimal) → decimal[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: float[], elem: float) → float[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: inet[], elem: inet) → inet[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: int[], elem: int) → int[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: interval[], elem: interval) → interval[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: string[], elem: string) → string[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: time[], elem: time) → time[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamp[], elem: timestamp) → timestamp[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timestamptz[], elem: timestamptz) → timestamptz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: uuid[], elem: uuid) → uuid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: anyenum[], elem: anyenum) → anyenum[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: box2d[], elem: box2d) → box2d[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geography[], elem: geography) → geography[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: geometry[], elem: geometry) → geometry[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: jsonb[], elem: jsonb) → jsonb[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: ltree[], elem: ltree) → ltree[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: oid[], elem: oid) → oid[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: refcursor[], elem: refcursor) → refcursor[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: timetz[], elem: timetz) → timetz[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: tuple[], elem: tuple) → tuple[]

Appends elem to array, returning the result.

+
Immutable
array_append(array: varbit[], elem: varbit) → varbit[]

Appends elem to array, returning the result.

+
Immutable
array_cat(left: bool[], right: bool[]) → bool[]

Appends two arrays.

+
Immutable
array_cat(left: bytes[], right: bytes[]) → bytes[]

Appends two arrays.

+
Immutable
array_cat(left: date[], right: date[]) → date[]

Appends two arrays.

+
Immutable
array_cat(left: decimal[], right: decimal[]) → decimal[]

Appends two arrays.

+
Immutable
array_cat(left: float[], right: float[]) → float[]

Appends two arrays.

+
Immutable
array_cat(left: inet[], right: inet[]) → inet[]

Appends two arrays.

+
Immutable
array_cat(left: int[], right: int[]) → int[]

Appends two arrays.

+
Immutable
array_cat(left: interval[], right: interval[]) → interval[]

Appends two arrays.

+
Immutable
array_cat(left: string[], right: string[]) → string[]

Appends two arrays.

+
Immutable
array_cat(left: time[], right: time[]) → time[]

Appends two arrays.

+
Immutable
array_cat(left: timestamp[], right: timestamp[]) → timestamp[]

Appends two arrays.

+
Immutable
array_cat(left: timestamptz[], right: timestamptz[]) → timestamptz[]

Appends two arrays.

+
Immutable
array_cat(left: uuid[], right: uuid[]) → uuid[]

Appends two arrays.

+
Immutable
array_cat(left: anyenum[], right: anyenum[]) → anyenum[]

Appends two arrays.

+
Immutable
array_cat(left: box2d[], right: box2d[]) → box2d[]

Appends two arrays.

+
Immutable
array_cat(left: geography[], right: geography[]) → geography[]

Appends two arrays.

+
Immutable
array_cat(left: geometry[], right: geometry[]) → geometry[]

Appends two arrays.

+
Immutable
array_cat(left: jsonb[], right: jsonb[]) → jsonb[]

Appends two arrays.

+
Immutable
array_cat(left: ltree[], right: ltree[]) → ltree[]

Appends two arrays.

+
Immutable
array_cat(left: oid[], right: oid[]) → oid[]

Appends two arrays.

+
Immutable
array_cat(left: pg_lsn[], right: pg_lsn[]) → pg_lsn[]

Appends two arrays.

+
Immutable
array_cat(left: refcursor[], right: refcursor[]) → refcursor[]

Appends two arrays.

+
Immutable
array_cat(left: timetz[], right: timetz[]) → timetz[]

Appends two arrays.

+
Immutable
array_cat(left: tuple[], right: tuple[]) → tuple[]

Appends two arrays.

+
Immutable
array_cat(left: varbit[], right: varbit[]) → varbit[]

Appends two arrays.

+
Immutable
array_length(input: anyelement[], array_dimension: int) → int

Calculates the length of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_lower(input: anyelement[], array_dimension: int) → int

Calculates the minimum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
array_position(array: bool[], elem: bool) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bool[], elem: bool, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: bytes[], elem: bytes) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: bytes[], elem: bytes, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: date[], elem: date) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: date[], elem: date, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: decimal[], elem: decimal) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: decimal[], elem: decimal, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: float[], elem: float) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: float[], elem: float, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: inet[], elem: inet) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: inet[], elem: inet, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: int[], elem: int) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: int[], elem: int, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: interval[], elem: interval) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: interval[], elem: interval, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: string[], elem: string) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: string[], elem: string, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: time[], elem: time) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: time[], elem: time, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamp[], elem: timestamp) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamp[], elem: timestamp, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timestamptz[], elem: timestamptz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: uuid[], elem: uuid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: uuid[], elem: uuid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: anyenum[], elem: anyenum) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: anyenum[], elem: anyenum, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: box2d[], elem: box2d) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: box2d[], elem: box2d, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geography[], elem: geography) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geography[], elem: geography, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: geometry[], elem: geometry) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: geometry[], elem: geometry, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: jsonb[], elem: jsonb) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: jsonb[], elem: jsonb, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: ltree[], elem: ltree) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: ltree[], elem: ltree, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: oid[], elem: oid) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: oid[], elem: oid, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: pg_lsn[], elem: pg_lsn, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: refcursor[], elem: refcursor) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: refcursor[], elem: refcursor, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: timetz[], elem: timetz) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: timetz[], elem: timetz, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: tuple[], elem: tuple) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: tuple[], elem: tuple, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_position(array: varbit[], elem: varbit) → int

Return the index of the first occurrence of elem in array.

+
Immutable
array_position(array: varbit[], elem: varbit, start: int) → int

Return the index of the first occurrence of elem in array, with the search begins at start index.

+
Immutable
array_positions(array: bool[], elem: bool) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: bytes[], elem: bytes) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: date[], elem: date) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: decimal[], elem: decimal) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: float[], elem: float) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: inet[], elem: inet) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: int[], elem: int) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: interval[], elem: interval) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: string[], elem: string) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: time[], elem: time) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamp[], elem: timestamp) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timestamptz[], elem: timestamptz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: uuid[], elem: uuid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: anyenum[], elem: anyenum) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: box2d[], elem: box2d) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geography[], elem: geography) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: geometry[], elem: geometry) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: jsonb[], elem: jsonb) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: ltree[], elem: ltree) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: oid[], elem: oid) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: pg_lsn[], elem: pg_lsn) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: refcursor[], elem: refcursor) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: timetz[], elem: timetz) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: tuple[], elem: tuple) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_positions(array: varbit[], elem: varbit) → int[]

Returns an array of indexes of all occurrences of elem in array.

+
Immutable
array_prepend(elem: bool, array: bool[]) → bool[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: bytes, array: bytes[]) → bytes[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: date, array: date[]) → date[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: decimal, array: decimal[]) → decimal[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: float, array: float[]) → float[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: inet, array: inet[]) → inet[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: int, array: int[]) → int[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: interval, array: interval[]) → interval[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: string, array: string[]) → string[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: time, array: time[]) → time[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamp, array: timestamp[]) → timestamp[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timestamptz, array: timestamptz[]) → timestamptz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: uuid, array: uuid[]) → uuid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: anyenum, array: anyenum[]) → anyenum[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: box2d, array: box2d[]) → box2d[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geography, array: geography[]) → geography[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: geometry, array: geometry[]) → geometry[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: jsonb, array: jsonb[]) → jsonb[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: ltree, array: ltree[]) → ltree[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: oid, array: oid[]) → oid[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: pg_lsn, array: pg_lsn[]) → pg_lsn[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: refcursor, array: refcursor[]) → refcursor[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: timetz, array: timetz[]) → timetz[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: tuple, array: tuple[]) → tuple[]

Prepends elem to array, returning the result.

+
Immutable
array_prepend(elem: varbit, array: varbit[]) → varbit[]

Prepends elem to array, returning the result.

+
Immutable
array_remove(array: bool[], elem: bool) → bool[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: bytes[], elem: bytes) → bytes[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: date[], elem: date) → date[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: decimal[], elem: decimal) → decimal[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: float[], elem: float) → float[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: inet[], elem: inet) → inet[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: int[], elem: int) → int[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: interval[], elem: interval) → interval[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: string[], elem: string) → string[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: time[], elem: time) → time[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamp[], elem: timestamp) → timestamp[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timestamptz[], elem: timestamptz) → timestamptz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: uuid[], elem: uuid) → uuid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: anyenum[], elem: anyenum) → anyenum[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: box2d[], elem: box2d) → box2d[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geography[], elem: geography) → geography[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: geometry[], elem: geometry) → geometry[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: jsonb[], elem: jsonb) → jsonb[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: ltree[], elem: ltree) → ltree[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: oid[], elem: oid) → oid[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: pg_lsn[], elem: pg_lsn) → pg_lsn[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: refcursor[], elem: refcursor) → refcursor[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: timetz[], elem: timetz) → timetz[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: tuple[], elem: tuple) → tuple[]

Remove from array all elements equal to elem.

+
Immutable
array_remove(array: varbit[], elem: varbit) → varbit[]

Remove from array all elements equal to elem.

+
Immutable
array_replace(array: bool[], toreplace: bool, replacewith: bool) → bool[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: bytes[], toreplace: bytes, replacewith: bytes) → bytes[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: date[], toreplace: date, replacewith: date) → date[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: decimal[], toreplace: decimal, replacewith: decimal) → decimal[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: float[], toreplace: float, replacewith: float) → float[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: inet[], toreplace: inet, replacewith: inet) → inet[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: int[], toreplace: int, replacewith: int) → int[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: interval[], toreplace: interval, replacewith: interval) → interval[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: string[], toreplace: string, replacewith: string) → string[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: time[], toreplace: time, replacewith: time) → time[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamp[], toreplace: timestamp, replacewith: timestamp) → timestamp[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timestamptz[], toreplace: timestamptz, replacewith: timestamptz) → timestamptz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: uuid[], toreplace: uuid, replacewith: uuid) → uuid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: anyenum[], toreplace: anyenum, replacewith: anyenum) → anyenum[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: box2d[], toreplace: box2d, replacewith: box2d) → box2d[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geography[], toreplace: geography, replacewith: geography) → geography[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: geometry[], toreplace: geometry, replacewith: geometry) → geometry[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: jsonb[], toreplace: jsonb, replacewith: jsonb) → jsonb[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: ltree[], toreplace: ltree, replacewith: ltree) → ltree[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: oid[], toreplace: oid, replacewith: oid) → oid[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: pg_lsn[], toreplace: pg_lsn, replacewith: pg_lsn) → pg_lsn[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: refcursor[], toreplace: refcursor, replacewith: refcursor) → refcursor[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: timetz[], toreplace: timetz, replacewith: timetz) → timetz[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: tuple[], toreplace: tuple, replacewith: tuple) → tuple[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_replace(array: varbit[], toreplace: varbit, replacewith: varbit) → varbit[]

Replace all occurrences of toreplace in array with replacewith.

+
Immutable
array_to_string(input: anyelement[], delim: string) → string

Join an array into a string with a delimiter.

+
Stable
array_to_string(input: anyelement[], delimiter: string, null: string) → string

Join an array into a string with a delimiter, replacing NULLs with a null string.

+
Stable
array_upper(input: anyelement[], array_dimension: int) → int

Calculates the maximum value of input on the provided array_dimension. However, because CockroachDB doesn’t yet support multi-dimensional arrays, the only supported array_dimension is 1.

+
Immutable
cardinality(input: anyelement[]) → int

Calculates the number of elements contained in input

+
Immutable
jsonb_array_to_string_array(input: jsonb) → string[]

Convert a JSONB array into a string array.

+
Immutable
string_to_array(str: string, delimiter: string) → string[]

Split a string into components on a delimiter.

+
Immutable
string_to_array(str: string, delimiter: string, null: string) → string[]

Split a string into components on a delimiter with a specified string to consider NULL.

+
Immutable
+ +### BOOL functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Matches case insensetively unescaped with pattern using escape as an escape token.

+
Immutable
inet_contained_by_or_equals(val: inet, container: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_contains_or_equals(container: inet, val: inet) → bool

Test for subnet inclusion or equality, using only the network parts of the addresses. The host part of the addresses is ignored.

+
Immutable
inet_same_family(val: inet, val: inet) → bool

Checks if two IP addresses are of the same IP family.

+
Immutable
like_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
not_ilike_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches case insensetively with pattern using escape as an escape token.

+
Immutable
not_like_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
not_similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Checks whether unescaped not matches with pattern using escape as an escape token.

+
Immutable
+ +### Comparison functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
greatest(anyelement...) → anyelement

Returns the element with the greatest value.

+
Immutable
least(anyelement...) → anyelement

Returns the element with the lowest value.

+
Immutable
num_nonnulls(any...) → int

Returns the number of nonnull arguments.

+
Immutable
num_nulls(any...) → int

Returns the number of null arguments.

+
Immutable
+ +### Cryptographic functions + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crypt(password: string, salt: string) → string

Generates a hash based on a password and salt. The hash algorithm and number of rounds if applicable are encoded in the salt.

+
Immutable
decrypt(data: bytes, key: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
decrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Decrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
digest(data: bytes, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
digest(data: string, type: string) → bytes

Computes a binary hash of the given data. type is the algorithm to use (md5, sha1, sha224, sha256, sha384, or sha512).

+
Immutable
encrypt(data: bytes, key: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
encrypt_iv(data: bytes, key: bytes, iv: bytes, type: string) → bytes

Encrypt data with key using the cipher method specified by type. If the mode is CBC, the provided iv will be used. Otherwise, it will be ignored.

+

The cipher type must have the format <algorithm>[-<mode>][/pad:<padding>] where:

+
    +
  • <algorithm> is aes
  • +
  • <mode> is cbc (default)
  • +
  • <padding> is pkcs (default) or none
  • +
+

This function requires an enterprise license on a CCL distribution.

+
Immutable
gen_random_bytes(count: int) → bytes

Returns count cryptographically strong random bytes. At most 1024 bytes can be extracted at a time.

+
Volatile
gen_salt(type: string) → string

Generates a salt for input into the crypt function using the default number of rounds.

+
Volatile
gen_salt(type: string, iter_count: int) → string

Generates a salt for input into the crypt function using iter_count number of rounds.

+
Volatile
hmac(data: bytes, key: bytes, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
hmac(data: string, key: string, type: string) → bytes

Calculates hashed MAC for data with key key. type is the same as in digest().

+
Immutable
+ +### DECIMAL functions + + + + + + +
Function → ReturnsDescriptionVolatility
hlc_to_timestamp(hlc: decimal) → timestamptz

Returns a TimestampTZ representation of a CockroachDB HLC in decimal form.

+

Note that a TimestampTZ has less precision than a CockroachDB HLC. It is intended as +a convenience function to display HLCs in a print-friendly form. Use the decimal +value if you rely on the HLC for accuracy.

+
Immutable
to_number(value: string, format: string) → decimal

Convert a string to a numeric using the given format.

+
Stable
+ +### Date and time functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
age(end: timestamptz, begin: timestamptz) → interval

Calculates the interval between begin and end, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use the timestamptz subtraction operator.

+
Immutable
age(val: timestamptz) → interval

Calculates the interval between val and the current time, normalized into years, months and days.

+

Note this may not be an accurate time span since years and months are normalized +from days, and years and months are out of context. To avoid normalizing days into +months and years, use now() - timestamptz.

+
Stable
clock_timestamp() → timestamp

Returns the current system time on one of the cluster nodes.

+
Volatile
clock_timestamp() → timestamptz

Returns the current system time on one of the cluster nodes.

+
Volatile
current_date() → date

Returns the date of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_timestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
current_timestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
date_part(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
date_part(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
date_part(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
date_trunc(element: string, input: date) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: interval) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: time) → interval

Truncates input to precision element. Sets all fields that are less +significant than element to zero.

+

Compatible elements: hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamp) → timestamp

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Immutable
date_trunc(element: string, input: timestamptz) → timestamptz

Truncates input to precision element. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
date_trunc(element: string, input: timestamptz, timezone: string) → timestamptz

Truncates input to precision element in the specified timezone. Sets all fields that are less +significant than element to zero (or one, for day and month)

+

Compatible elements: millennium, century, decade, year, quarter, month, +week, day, hour, minute, second, millisecond, microsecond.

+
Stable
experimental_follower_read_timestamp() → timestamptz

Same as follower_read_timestamp. This name is deprecated.

+
Volatile
experimental_strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
experimental_strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
extract(element: string, input: date) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: interval) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, +month, day, hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: time) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamp) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch

+
Immutable
extract(element: string, input: timestamptz) → float

Extracts element from input.

+

Compatible elements: millennium, century, decade, year, isoyear, +quarter, month, week, dayofweek, isodow, dayofyear, julian, +hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Stable
extract(element: string, input: timetz) → float

Extracts element from input.

+

Compatible elements: hour, minute, second, millisecond, microsecond, epoch, +timezone, timezone_hour, timezone_minute

+
Immutable
extract_duration(element: string, input: interval) → int

Extracts element from input. +Compatible elements: hour, minute, second, millisecond, microsecond. +This is deprecated in favor of extract which supports duration.

+
Immutable
follower_read_timestamp() → timestamptz

Returns a timestamp which is very likely to be safe to perform +against a follower replica.

+

This function is intended to be used with an AS OF SYSTEM TIME clause to perform +historical reads against a time which is recent but sufficiently old for reads +to be performed against the closest replica as opposed to the currently +leaseholder for a given range.

+

Note that this function requires an enterprise license on a CCL distribution to +return a result that is less likely the closest replica. It is otherwise +hardcoded as -4.8s from the statement time, which may not result in reading from the +nearest replica.

+
Volatile
localtimestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
localtimestamp(precision: int) → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtimestamp(precision: int) → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
make_date(year: int, month: int, day: int) → date

Create date (formatted according to ISO 8601) from year, month, and day fields (negative years signify BC).

+
Immutable
make_timestamp(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamp

Create timestamp (formatted according to ISO 8601) from year, month, day, hour, minute, and seconds fields (negative years signify BC).

+
Immutable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
make_timestamptz(year: int, month: int, day: int, hour: int, min: int, sec: float, timezone: string) → timestamptz

Create timestamp (formatted according to ISO 8601) with time zone from year, month, day, hour, minute and seconds fields (negative years signify BC). If timezone is not specified, the current time zone is used.

+
Stable
now() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
now() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
overlaps(s1: date, e1: date, s1: date, e2: date) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: date, e1: interval, s1: date, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: interval, s1: time, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: time, e1: time, s1: time, e2: time) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: interval, s1: timestamp, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamp, e1: timestamp, s1: timestamp, e2: timestamp) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timestamptz, e1: interval, s1: timestamptz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Stable
overlaps(s1: timestamptz, e1: timestamptz, s1: timestamptz, e2: timestamptz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: interval, s1: timetz, e2: interval) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
overlaps(s1: timetz, e1: timetz, s1: timetz, e2: timetz) → bool

Returns if two time periods (defined by their endpoints) overlap.

+
Immutable
statement_timestamp() → timestamp

Returns the start time of the current statement.

+
Stable
statement_timestamp() → timestamptz

Returns the start time of the current statement.

+
Stable
strftime(input: date, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamp, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strftime(input: timestamptz, extract_format: string) → string

From input, extracts and formats the time as identified in extract_format using standard strftime notation (though not all formatting is supported).

+
Immutable
strptime(input: string, format: string) → timestamptz

Returns input as a timestamptz using format (which uses standard strptime formatting).

+
Immutable
timeofday() → string

Returns the current system time on one of the cluster nodes as a string.

+
Stable
timezone(timezone: string, time: time) → timetz

Treat given time without time zone as located in the specified time zone.

+
Stable
timezone(timezone: string, timestamp: timestamp) → timestamptz

Treat given time stamp without time zone as located in the specified time zone.

+
Immutable
timezone(timezone: string, timestamptz: timestamptz) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Immutable
timezone(timezone: string, timestamptz_string: string) → timestamp

Convert given time stamp with time zone to the new time zone, with no time zone designation.

+
Stable
timezone(timezone: string, timetz: timetz) → timetz

Convert given time with time zone to the new time zone.

+
Stable
to_char(date: date) → string

Convert an date to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(date: date, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_char(interval: interval) → string

Convert an interval to a string assuming the Postgres IntervalStyle.

+
Immutable
to_char(interval: interval, format: string) → string

Convert an interval to a string using the given format.

+
Stable
to_char(number: decimal, format: string) → string

Convert a decimal to a string using the given format.

+
Stable
to_char(number: float, format: string) → string

Convert a float to a string using the given format.

+
Stable
to_char(number: int, format: string) → string

Convert an integer to a string using the given format.

+
Stable
to_char(timestamp: timestamp) → string

Convert an timestamp to a string assuming the ISO, MDY DateStyle.

+
Immutable
to_char(timestamp: timestamp, format: string) → string

Convert an timestamp to a string using the given format.

+
Stable
to_char(timestamptz: timestamptz, format: string) → string

Convert a timestamp with time zone to a string using the given format.

+
Stable
to_date(date_string: string, format: string) → date

Convert a string to a date using the given format.

+
Stable
to_timestamp(date_string: string, format: string) → timestamptz

Convert a string to a timestamp with time zone using the given format.

+
Stable
to_timestamp(timestamp: float) → timestamptz

Convert Unix epoch (seconds since 1970-01-01 00:00:00+00) to timestamp with time zone.

+
Immutable
transaction_timestamp() → date

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamp

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+
Stable
transaction_timestamp() → timestamptz

Returns the time of the current transaction.

+

The value is based on a timestamp picked when the transaction starts +and which stays constant throughout the transaction. This timestamp +has no relationship with the commit order of concurrent transactions.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
with_max_staleness(max_staleness: interval) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_max_staleness(max_staleness: interval, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp within the staleness +bound that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
with_min_timestamp(min_timestamp: timestamptz, nearest_only: bool) → timestamptz

When used in the AS OF SYSTEM TIME clause of an single-statement, +read-only transaction, CockroachDB chooses the newest timestamp before the min_timestamp +that allows execution of the reads at the nearest available replica without blocking.

+

If nearest_only is set to true, reads that cannot be served using the nearest +available replica will error.

+

Note this function requires an enterprise license on a CCL distribution.

+
Volatile
+ +### Enum functions + + + + + + + + +
Function → ReturnsDescriptionVolatility
enum_first(val: anyenum) → anyenum

Returns the first value of the input enum type.

+
Stable
enum_last(val: anyenum) → anyenum

Returns the last value of the input enum type.

+
Stable
enum_range(lower: anyenum, upper: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array between the two arguments (inclusive).

+
Stable
enum_range(val: anyenum) → anyenum[]

Returns all values of the input enum in an ordered array.

+
Stable
+ +### FLOAT functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abs(val: decimal) → decimal

Calculates the absolute value of val.

+
Immutable
abs(val: float) → float

Calculates the absolute value of val.

+
Immutable
abs(val: int) → int

Calculates the absolute value of val.

+
Immutable
acos(val: float) → float

Calculates the inverse cosine of val.

+
Immutable
acosd(val: float) → float

Calculates the inverse cosine of val with the result in degrees

+
Immutable
acosh(val: float) → float

Calculates the inverse hyperbolic cosine of val.

+
Immutable
asin(val: float) → float

Calculates the inverse sine of val.

+
Immutable
asind(val: float) → float

Calculates the inverse sine of val with the result in degrees.

+
Immutable
asinh(val: float) → float

Calculates the inverse hyperbolic sine of val.

+
Immutable
atan(val: float) → float

Calculates the inverse tangent of val.

+
Immutable
atan2(x: float, y: float) → float

Calculates the inverse tangent of x/y.

+
Immutable
atan2d(x: float, y: float) → float

Calculates the inverse tangent of x/y with the result in degrees

+
Immutable
atand(val: float) → float

Calculates the inverse tangent of val with the result in degrees.

+
Immutable
atanh(val: float) → float

Calculates the inverse hyperbolic tangent of val.

+
Immutable
cbrt(val: decimal) → decimal

Calculates the cube root (∛) of val.

+
Immutable
cbrt(val: float) → float

Calculates the cube root (∛) of val.

+
Immutable
ceil(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceil(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: decimal) → decimal

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: float) → float

Calculates the smallest integer not smaller than val.

+
Immutable
ceiling(val: int) → float

Calculates the smallest integer not smaller than val.

+
Immutable
cos(val: float) → float

Calculates the cosine of val.

+
Immutable
cosd(val: float) → float

Calculates the cosine of val where val is in degrees.

+
Immutable
cosh(val: float) → float

Calculates the hyperbolic cosine of val.

+
Immutable
cot(val: float) → float

Calculates the cotangent of val.

+
Immutable
cotd(val: float) → float

Calculates the cotangent of val where val is in degrees.

+
Immutable
degrees(val: float) → float

Converts val as a radian value to a degree value.

+
Immutable
div(x: decimal, y: decimal) → decimal

Calculates the integer quotient of x/y.

+
Immutable
div(x: float, y: float) → float

Calculates the integer quotient of x/y.

+
Immutable
div(x: int, y: int) → int

Calculates the integer quotient of x/y.

+
Immutable
exp(val: decimal) → decimal

Calculates e ^ val.

+
Immutable
exp(val: float) → float

Calculates e ^ val.

+
Immutable
floor(val: decimal) → decimal

Calculates the largest integer not greater than val.

+
Immutable
floor(val: float) → float

Calculates the largest integer not greater than val.

+
Immutable
floor(val: int) → float

Calculates the largest integer not greater than val.

+
Immutable
isnan(val: decimal) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
isnan(val: float) → bool

Returns true if val is NaN, false otherwise.

+
Immutable
ln(val: decimal) → decimal

Calculates the natural log of val.

+
Immutable
ln(val: float) → float

Calculates the natural log of val.

+
Immutable
log(b: decimal, x: decimal) → decimal

Calculates the base b log of val.

+
Immutable
log(b: float, x: float) → float

Calculates the base b log of val.

+
Immutable
log(val: decimal) → decimal

Calculates the base 10 log of val.

+
Immutable
log(val: float) → float

Calculates the base 10 log of val.

+
Immutable
mod(x: decimal, y: decimal) → decimal

Calculates x%y.

+
Immutable
mod(x: float, y: float) → float

Calculates x%y.

+
Immutable
mod(x: int, y: int) → int

Calculates x%y.

+
Immutable
pi() → float

Returns the value for pi (3.141592653589793).

+
Immutable
pow(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
pow(x: float, y: float) → float

Calculates x^y.

+
Immutable
pow(x: int, y: int) → int

Calculates x^y.

+
Immutable
power(x: decimal, y: decimal) → decimal

Calculates x^y.

+
Immutable
power(x: float, y: float) → float

Calculates x^y.

+
Immutable
power(x: int, y: int) → int

Calculates x^y.

+
Immutable
radians(val: float) → float

Converts val as a degree value to a radians value.

+
Immutable
random() → float

Returns a random floating-point number between 0 (inclusive) and 1 (exclusive). Note that the value contains at most 53 bits of randomness.

+
Volatile
round(input: decimal, decimal_accuracy: int) → decimal

Keeps decimal_accuracy number of figures to the right of the zero position in input using half away from zero rounding. If decimal_accuracy is not in the range -2^31…(2^31-1), the results are undefined.

+
Immutable
round(input: float, decimal_accuracy: int) → float

Keeps decimal_accuracy number of figures to the right of the zero position in input using half to even (banker’s) rounding.

+
Immutable
round(val: decimal) → decimal

Rounds val to the nearest integer, half away from zero: round(+/-2.4) = +/-2, round(+/-2.5) = +/-3.

+
Immutable
round(val: float) → float

Rounds val to the nearest integer using half to even (banker’s) rounding.

+
Immutable
setseed(seed: float) → void

Sets the seed for subsequent random() calls in this session (value between -1.0 and 1.0, inclusive). There are no guarantees as to how this affects the seed of random() calls that appear in the same query as setseed().

+
Volatile
sign(val: decimal) → decimal

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: float) → float

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sign(val: int) → int

Determines the sign of val: 1 for positive; 0 for 0 values; -1 for negative.

+
Immutable
sin(val: float) → float

Calculates the sine of val.

+
Immutable
sind(val: float) → float

Calculates the sine of val where val is in degrees.

+
Immutable
sinh(val: float) → float

Calculates the hyperbolic sine of val.

+
Immutable
sqrt(val: decimal) → decimal

Calculates the square root of val.

+
Immutable
sqrt(val: float) → float

Calculates the square root of val.

+
Immutable
tan(val: float) → float

Calculates the tangent of val.

+
Immutable
tand(val: float) → float

Calculates the tangent of val where val is in degrees.

+
Immutable
tanh(val: float) → float

Calculates the hyperbolic tangent of val.

+
Immutable
trunc(val: decimal) → decimal

Truncates the decimal values of val.

+
Immutable
trunc(val: decimal, scale: int) → decimal

Truncate val to scale decimal places

+
Immutable
trunc(val: float) → float

Truncates the decimal values of val.

+
Immutable
+ +### Full Text Search functions + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
phraseto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The <-> operator is inserted between each token in the input.

+
Immutable
phraseto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The <-> operator is inserted between each token in the input.

+
Stable
plainto_tsquery(config: string, text: string) → tsquery

Converts text to a tsquery, normalizing words according to the specified configuration. The & operator is inserted between each token in the input.

+
Immutable
plainto_tsquery(text: string) → tsquery

Converts text to a tsquery, normalizing words according to the default configuration. The & operator is inserted between each token in the input.

+
Stable
to_tsquery(config: string, text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the specified configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Immutable
to_tsquery(text: string) → tsquery

Converts the input text into a tsquery by normalizing each word in the input according to the default configuration. The input must already be formatted like a tsquery, in other words, subsequent tokens must be connected by a tsquery operator (&, |, <->, !).

+
Stable
to_tsvector(config: string, text: string) → tsvector

Converts text to a tsvector, normalizing words according to the specified configuration. Position information is included in the result.

+
Immutable
to_tsvector(text: string) → tsvector

Converts text to a tsvector, normalizing words according to the default configuration. Position information is included in the result.

+
Stable
ts_parse(parser_name: string, document: string) → tuple{int AS tokid, string AS token}

ts_parse parses the given document and returns a series of records, one for each token produced by parsing. Each record includes a tokid showing the assigned token type and a token which is the text of the token.

+
Stable
ts_rank(vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
ts_rank(weights: float[], vector: tsvector, query: tsquery, normalization: int) → float4

Ranks vectors based on the frequency of their matching lexemes.

+
Immutable
+ +### Fuzzy String Matching functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
daitch_mokotoff(source: string) → string[]

Returns an array of Daitch-Mokotoff soundex codes for the input string.

+
Immutable
dmetaphone(source: string) → string

Returns the primary Double Metaphone code for the input string.

+
Immutable
dmetaphone_alt(source: string) → string

Returns the alternate Double Metaphone code for the input string.

+
Immutable
levenshtein(source: string, target: string) → int

Calculates the Levenshtein distance between two strings. Maximum input length is 255 characters.

+
Immutable
levenshtein(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. Maximum input length is 255 characters.

+
Immutable
levenshtein_less_equal(source: string, target: string, ins_cost: int, del_cost: int, sub_cost: int, max_d: int) → int

Calculates the Levenshtein distance between two strings. The cost parameters specify how much to charge for each edit operation. If actual distance is less or equal then max_d, then it returns the distance. Otherwise this function returns a value greater than max_d. The maximum length of the input strings is 255 characters.

+
Immutable
levenshtein_less_equal(source: string, target: string, max_d: int) → int

Calculates the Levenshtein distance between two strings. If actual distance is less or equal then max_d, then it returns the distance. Otherwise this function returns a value greater than max_d. The maximum length of the input strings is 255 characters.

+
Immutable
metaphone(source: string, max_output_length: int) → string

Convert a string to its Metaphone code. Maximum input length is 255 characters

+
Immutable
soundex(source: string) → string

Convert a string to its Soundex code.

+
Immutable
+ +### ID generation functions + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
experimental_uuid_v4() → bytes

Returns a UUID.

+
Volatile
gen_random_ulid() → uuid

Generates a random ULID and returns it as a value of UUID type.

+
Volatile
gen_random_uuid() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
unique_rowid() → int

Returns a unique ID used by CockroachDB to generate unique row IDs if a Primary Key isn’t defined for the table. The value is a combination of the insert timestamp and the ID of the node executing the statement, which guarantees this combination is globally unique. However, there can be gaps and the order is not completely guaranteed.

+
Volatile
unordered_unique_rowid() → int

Returns a unique ID. The value is a combination of the insert timestamp (bit-reversed) and the ID of the node executing the statement, which guarantees this combination is globally unique. The way it is generated is statistically likely to not have any ordering relative to previously generated values.

+
Volatile
uuid_generate_v1() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. To avoid exposing the server’s real MAC address, this uses a random MAC address and a timestamp. Essentially, this is an alias for uuid_generate_v1mc.

+
Volatile
uuid_generate_v1mc() → uuid

Generates a version 1 UUID, and returns it as a value of UUID type. This uses a random MAC address and a timestamp.

+
Volatile
uuid_generate_v3(namespace: uuid, name: string) → uuid

Generates a version 3 UUID in the given namespace using the specified input name, with md5 as the hashing method. The namespace should be one of the special constants produced by the uuid_ns_*() functions.

+
Immutable
uuid_generate_v4() → uuid

Generates a random version 4 UUID, and returns it as a value of UUID type.

+
Volatile
uuid_generate_v5(namespace: uuid, name: string) → uuid

Generates a version 5 UUID in the given namespace using the specified input name. This is similar to a version 3 UUID, except it uses SHA-1 for hashing.

+
Immutable
uuid_nil() → uuid

Returns a nil UUID constant.

+
Immutable
uuid_ns_dns() → uuid

Returns a constant designating the DNS namespace for UUIDs.

+
Immutable
uuid_ns_oid() → uuid

Returns a constant designating the ISO object identifier (OID) namespace for UUIDs. These are unrelated to the OID type used internally in the database.

+
Immutable
uuid_ns_url() → uuid

Returns a constant designating the URL namespace for UUIDs.

+
Immutable
uuid_ns_x500() → uuid

Returns a constant designating the X.500 distinguished name (DN) namespace for UUIDs.

+
Immutable
uuid_v4() → bytes

Returns a UUID.

+
Volatile
+ +### INET functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
abbrev(val: inet) → string

Converts the combined IP address and prefix length to an abbreviated display format as text.For INET types, this will omit the prefix length if it’s not the default (32 or IPv4, 128 for IPv6)

+

For example, abbrev('192.168.1.2/24') returns '192.168.1.2/24'

+
Immutable
broadcast(val: inet) → inet

Gets the broadcast address for the network address represented by the value.

+

For example, broadcast('192.168.1.2/24') returns '192.168.1.255/24'

+
Immutable
family(val: inet) → int

Extracts the IP family of the value; 4 for IPv4, 6 for IPv6.

+

For example, family('::1') returns 6

+
Immutable
host(val: inet) → string

Extracts the address part of the combined address/prefixlen value as text.

+

For example, host('192.168.1.2/16') returns '192.168.1.2'

+
Immutable
hostmask(val: inet) → inet

Creates an IP host mask corresponding to the prefix length in the value.

+

For example, hostmask('192.168.1.2/16') returns '0.0.255.255'

+
Immutable
masklen(val: inet) → int

Retrieves the prefix length stored in the value.

+

For example, masklen('192.168.1.2/16') returns 16

+
Immutable
netmask(val: inet) → inet

Creates an IP network mask corresponding to the prefix length in the value.

+

For example, netmask('192.168.1.2/16') returns '255.255.0.0'

+
Immutable
set_masklen(val: inet, prefixlen: int) → inet

Sets the prefix length of val to prefixlen.

+

For example, set_masklen('192.168.1.2', 16) returns '192.168.1.2/16'.

+
Immutable
+ +### INT functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
crc32c(bytes...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32c(string...) → int

Calculates the CRC-32 hash using the Castagnoli polynomial.

+
Leakproof
crc32ieee(bytes...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
crc32ieee(string...) → int

Calculates the CRC-32 hash using the IEEE polynomial.

+
Leakproof
fnv32(bytes...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32(string...) → int

Calculates the 32-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv32a(bytes...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv32a(string...) → int

Calculates the 32-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64(bytes...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64(string...) → int

Calculates the 64-bit FNV-1 hash value of a set of values.

+
Leakproof
fnv64a(bytes...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
fnv64a(string...) → int

Calculates the 64-bit FNV-1a hash value of a set of values.

+
Leakproof
width_bucket(operand: decimal, b1: decimal, b2: decimal, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2. Returns 0 or count+1 for an input outside that range.

+
Immutable
width_bucket(operand: int, b1: int, b2: int, count: int) → int

return the bucket number to which operand would be assigned in a histogram having count equal-width buckets spanning the range b1 to b2.

+
Immutable
width_bucket(operand: anyelement, thresholds: anyelement[]) → int

return the bucket number to which operand would be assigned given an array listing the lower bounds of the buckets; returns 0 for an input less than the first lower bound; the thresholds array must be sorted, smallest first, or unexpected results will be obtained

+
Immutable
+ +### JSONB functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
array_to_json(array: anyelement[]) → jsonb

Returns the array as JSON or JSONB.

+
Stable
array_to_json(array: anyelement[], pretty_bool: bool) → jsonb

Returns the array as JSON or JSONB.

+
Stable
json_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
json_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
json_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
json_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
json_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
json_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
json_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
json_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
json_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
json_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
json_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
json_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
json_remove_path(val: jsonb, path: string[]) → jsonb

Remove the specified path from the JSON object.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
json_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
json_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
json_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
json_valid(string: string) → bool

Returns whether the given string is a valid JSON or not

+
Immutable
jsonb_array_elements(input: jsonb) → jsonb

Expands a JSON array to a set of JSON values.

+
Immutable
jsonb_array_elements_text(input: jsonb) → string

Expands a JSON array to a set of text values.

+
Immutable
jsonb_array_length(json: jsonb) → int

Returns the number of elements in the outermost JSON or JSONB array.

+
Immutable
jsonb_build_array(any...) → jsonb

Builds a possibly-heterogeneously-typed JSON or JSONB array out of a variadic argument list.

+
Stable
jsonb_build_object(any...) → jsonb

Builds a JSON object out of a variadic argument list.

+
Stable
jsonb_each(input: jsonb) → tuple{string AS key, jsonb AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs.

+
Immutable
jsonb_each_text(input: jsonb) → tuple{string AS key, string AS value}

Expands the outermost JSON or JSONB object into a set of key/value pairs. The returned values will be of type text.

+
Immutable
jsonb_exists_any(json: jsonb, array: string[]) → bool

Returns whether any of the strings in the text array exist as top-level keys or array elements

+
Immutable
jsonb_extract_path(jsonb, string...) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_extract_path_text(jsonb, string...) → string

Returns the JSON value as text pointed to by the variadic arguments.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments. new_val will be inserted before path target.

+
Immutable
jsonb_insert(target: jsonb, path: string[], new_val: jsonb, insert_after: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If insert_after is true (default is false), new_val will be inserted after path target.

+
Immutable
jsonb_object(keys: string[], values: string[]) → jsonb

This form of json_object takes keys and values pairwise from two separate arrays. In all other respects it is identical to the one-argument form.

+
Immutable
jsonb_object(texts: string[]) → jsonb

Builds a JSON or JSONB object out of a text array. The array must have exactly one dimension with an even number of members, in which case they are taken as alternating key/value pairs.

+
Immutable
jsonb_populate_record(base: anyelement, from_json: jsonb) → anyelement

Expands the object in from_json to a row whose columns match the record type defined by base.

+
Stable
jsonb_populate_recordset(base: anyelement, from_json: jsonb) → anyelement

Expands the outermost array of objects in from_json to a set of rows whose columns match the record type defined by base

+
Stable
jsonb_pretty(val: jsonb) → string

Returns the given JSON value as a STRING indented and with newlines.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb) → jsonb

Returns the JSON value pointed to by the variadic arguments.

+
Immutable
jsonb_set(val: jsonb, path: string[], to: jsonb, create_missing: bool) → jsonb

Returns the JSON value pointed to by the variadic arguments. If create_missing is false, new keys will not be inserted to objects and values will not be prepended or appended to arrays.

+
Immutable
jsonb_strip_nulls(from_json: jsonb) → jsonb

Returns from_json with all object fields that have null values omitted. Other null values are untouched.

+
Immutable
jsonb_typeof(val: jsonb) → string

Returns the type of the outermost JSON value as a text string.

+
Immutable
row_to_json(row: tuple) → jsonb

Returns the row as a JSON object.

+
Stable
to_json(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
to_jsonb(val: anyelement) → jsonb

Returns the value as JSON or JSONB.

+
Stable
+ +### Jsonpath functions + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
jsonb_path_exists(target: jsonb, path: jsonpath) → bool

Checks whether the JSON path returns any item for the specified JSON value.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_exists(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Checks whether the JSON path returns any item for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named +values to be substituted into the jsonpath expression. If the silent +argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.)

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_match(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → bool

Returns the SQL boolean result of a JSON path predicate check +for the specified JSON value. (This is useful only with predicate check +expressions, not SQL-standard JSON path expressions, since it will +either fail or return NULL if the path result is not a single boolean +value.) The vars argument must be a JSON object, and its fields provide +named values to be substituted into the jsonpath expression. If the +silent argument is true, the function suppresses the following errors: +missing object field or array element, unexpected JSON item type, +datetime and numeric errors.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the specified JSON value. +The vars argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, +the function suppresses the following errors: missing object field or array +element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression.

+
Immutable
jsonb_path_query_array(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns all JSON items returned by the JSON path for the +specified JSON value, as a JSON array. The vars argument must be a +JSON object, and its fields provide named values to be substituted +into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression.

+
Immutable
jsonb_path_query_first(target: jsonb, path: jsonpath, vars: jsonb, silent: bool) → jsonb

Returns the first JSON item returned by the JSON path for the +specified JSON value, or NULL if there are no results. The vars +argument must be a JSON object, and its fields provide named values +to be substituted into the jsonpath expression. If the silent argument is true, the +function suppresses the following errors: missing object field or +array element, unexpected JSON item type, datetime and numeric errors.

+
Immutable
+ +### LTree functions + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
index(a: ltree, b: ltree) → int

position of first occurrence of b in a; -1 if not found

+
Immutable
index(a: ltree, b: ltree, offset: int) → int

position of first occurrence of b in a, starting at offset; -1 if not found

+
Immutable
lca(ltree, ltree, ltree...) → ltree

lowest common ancestor, i.e., longest common prefix of paths

+
Immutable
lca(ltree[]: ltree[]) → ltree

lowest common ancestor, i.e., longest common prefix of paths

+
Immutable
ltree2text(ltree: ltree) → string

cast ltree to text

+
Immutable
nlevel(ltree: ltree) → int

number of labels in path ltree

+
Immutable
subltree(ltree: ltree, start: int, end: int) → ltree

subpath of ltree from position start to position end-1 (counting from 0)

+
Immutable
subpath(ltree: ltree, offset: int) → ltree

subpath of ltree starting at position offset, extending to end of path. If offset is negative, subpath starts that far from the end of the path.

+
Immutable
subpath(ltree: ltree, offset: int, length: int) → ltree

subpath of ltree starting at position offset, length length. If offset is negative, subpath starts that far from the end of the path. If length is negative, leaves that many labels off the end of the path.

+
Immutable
text2ltree(text: string) → ltree

cast text to ltree

+
Immutable
+ +### Multi-region functions + + + + + + + +
Function → ReturnsDescriptionVolatility
default_to_database_primary_region(val: string) → string

Returns the given region if the region has been added to the current database. +Otherwise, this will return the primary region of the current database. +This will error if the current database is not a multi-region database.

+
Stable
gateway_region() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
rehome_row() → string

Returns the region of the connection’s current node as defined by +the locality flag on node startup. Returns an error if no region is set.

+
Stable
+ +### PGVector functions + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cosine_distance(v1: vector, v2: vector) → float

Returns the cosine distance between the two vectors.

+
Immutable
inner_product(v1: vector, v2: vector) → float

Returns the inner product between the two vectors.

+
Immutable
l1_distance(v1: vector, v2: vector) → float

Returns the Manhattan distance between the two vectors.

+
Immutable
l2_distance(v1: vector, v2: vector) → float

Returns the Euclidean distance between the two vectors.

+
Immutable
vector_dims(vector: vector) → int

Returns the number of the dimensions in the vector.

+
Immutable
vector_norm(vector: vector) → float

Returns the Euclidean norm of the vector.

+
Immutable
+ +### STRING[] functions + + + + + + +
Function → ReturnsDescriptionVolatility
regexp_split_to_array(string: string, pattern: string) → string[]

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_array(string: string, pattern: string, flags: string) → string[]

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
+ +### Sequence functions + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
currval(sequence_name: string) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
currval(sequence_name: regclass) → int

Returns the latest value obtained with nextval for this sequence in this session.

+
Volatile
lastval() → int

Return value most recently obtained with nextval in this session.

+
Volatile
nextval(sequence_name: string) → int

Advances the given sequence and returns its new value.

+
Volatile
nextval(sequence_name: regclass) → int

Advances the given sequence and returns its new value.

+
Volatile
setval(sequence_name: string, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: string, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
setval(sequence_name: regclass, value: int) → int

Set the given sequence’s current value. The next call to nextval will return value + Increment

+
Volatile
setval(sequence_name: regclass, value: int, is_called: bool) → int

Set the given sequence’s current value. If is_called is false, the next call to nextval will return value; otherwise value + Increment.

+
Volatile
+ +### Set-returning functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
aclexplode(aclitems: string[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem privileges.

+
Stable
aclexplode(aclitems: aclitem[]) → tuple{oid AS grantor, oid AS grantee, string AS privilege_type, bool AS is_grantable}

Produces a virtual table containing aclitem privileges.

+
Stable
generate_series(start: int, end: int) → int

Produces a virtual table containing the integer values from start to end, inclusive.

+
Immutable
generate_series(start: int, end: int, step: int) → int

Produces a virtual table containing the integer values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamp, end: timestamp, step: interval) → timestamp

Produces a virtual table containing the timestamp values from start to end, inclusive, by increment of step.

+
Immutable
generate_series(start: timestamptz, end: timestamptz, step: interval) → timestamptz

Produces a virtual table containing the timestampTZ values from start to end, inclusive, by increment of step.

+
Immutable
generate_subscripts(array: anyelement[]) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int) → int

Returns a series comprising the given array’s subscripts.

+
Immutable
generate_subscripts(array: anyelement[], dim: int, reverse: bool) → int

Returns a series comprising the given array’s subscripts.

+

When reverse is true, the series is returned in reverse order.

+
Immutable
information_schema._pg_expandarray(input: anyelement[]) → tuple{anyelement AS x, int AS n}

Returns the input array as a set of rows with an index

+
Immutable
json_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
json_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
json_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
jsonb_object_keys(input: jsonb) → string

Returns sorted set of keys in the outermost JSON object.

+
Immutable
jsonb_to_record(input: jsonb) → tuple

Builds an arbitrary record from a JSON object.

+
Stable
jsonb_to_recordset(input: jsonb) → tuple

Builds an arbitrary set of records from a JSON array of objects.

+
Stable
pg_get_keywords() → tuple{string AS word, string AS catcode, string AS catdesc}

Produces a virtual table containing the keywords known to the SQL parser.

+
Immutable
pg_options_to_table(options: string[]) → tuple{string AS option_name, string AS option_value}

Converts the options array format to a table.

+
Stable
regexp_split_to_table(string: string, pattern: string) → string

Split string using a POSIX regular expression as the delimiter.

+
Immutable
regexp_split_to_table(string: string, pattern: string, flags: string) → string

Split string using a POSIX regular expression as the delimiter with flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
unnest(anyelement[], anyelement[], anyelement[]...) → tuple{anyelement AS unnest, anyelement AS unnest, anyelement AS unnest}

Returns the input arrays as a set of rows

+
Immutable
unnest(input: anyelement[]) → anyelement

Returns the input array as a set of rows

+
Immutable
workload_index_recs() → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
workload_index_recs(timestamptz: timestamptz) → tuple{string AS index_rec, bytes[] AS fingerprint_ids}

Returns index recommendations and the fingerprint ids that the indexes will impact

+
Immutable
+ +### Spatial functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
_st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
_st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant does not utilize any spatial index.

+
Immutable
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(catalog_name: string, schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(schema_name: string, table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
addgeometrycolumn(table_name: string, column_name: string, srid: int, type: string, dimension: int, use_typmod: bool) → string

Adds a new geometry column to an existing table and returns metadata about the column created.

+
Volatile
geometrytype(geometry: geometry) → string

Returns the type of geometry as a string.

+

This function utilizes the GEOS module.

+
Immutable
geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
postgis_addbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_dropbbox(geometry: geometry) → geometry

Compatibility placeholder function with PostGIS. This does not perform any operation on the Geometry.

+
Immutable
postgis_extensions_upgrade() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_full_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_geos_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_getbbox(geometry: geometry) → box2d

Returns a box2d encapsulating the given Geometry.

+
Immutable
postgis_hasbbox(geometry: geometry) → bool

Returns whether a given Geometry has a bounding box. False for points and empty geometries; always true otherwise.

+
Immutable
postgis_lib_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_lib_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_liblwgeom_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_libxml_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_proj_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_build_date() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_installed() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_scripts_released() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
postgis_wagyu_version() → string

Compatibility placeholder function with PostGIS. Returns a fixed string based on PostGIS 3.0.1, with minor edits.

+
Immutable
st_3dlength(geometry: geometry) → float

Returns the 3-dimensional or 2-dimensional length of the geometry.

+

Note ST_3DLength is only valid for LineString or MultiLineString. +For 2-D lines it will return the 2-D length (same as ST_Length and ST_Length2D)

+

This function utilizes the GEOS module.

+
Immutable
st_addmeasure(geometry: geometry, start: float, end: float) → geometry

Returns a copy of a LineString or MultiLineString with measure coordinates linearly interpolated between the specified start and end values. Any existing M coordinates will be overwritten.

+
Immutable
st_addpoint(line_string: geometry, point: geometry) → geometry

Adds a Point to the end of a LineString.

+
Immutable
st_addpoint(line_string: geometry, point: geometry, index: int) → geometry

Adds a Point to a LineString at the given 0-based index (-1 to append).

+
Immutable
st_affine(geometry: geometry, a: float, b: float, c: float, d: float, e: float, f: float, g: float, h: float, i: float, x_off: float, y_off: float, z_off: float) → geometry

Applies a 3D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b c x_off \ / x
+| d e f y_off | | y | +| g h i z_off | | z | +\ 0 0 0 1 / \ 0 /

+
Immutable
st_affine(geometry: geometry, a: float, b: float, d: float, e: float, x_off: float, y_off: float) → geometry

Applies a 2D affine transformation to the given geometry.

+

The matrix transformation will be applied as follows for each coordinate: +/ a b x_off \ / x
+| d e y_off | | y | +\ 0 0 1 / \ 0 /

+
Immutable
st_angle(line1: geometry, line2: geometry) → float

Returns the clockwise angle between two LINESTRING geometries, treating them as vectors between their start- and endpoints. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry) → float

Returns the clockwise angle between the vectors formed by point2,point1 and point2,point3. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_angle(point1: geometry, point2: geometry, point3: geometry, point4: geometry) → float

Returns the clockwise angle between the vectors formed by point1,point2 and point3,point4. The arguments must be POINT geometries. Returns NULL if any vectors have 0 length.

+
Immutable
st_area(geography: geography) → float

Returns the area of the given geography in meters^2. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geography: geography, use_spheroid: bool) → float

Returns the area of the given geography in meters^2.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_area(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_area(geometry_str: string) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_area2d(geometry: geometry) → float

Returns the area of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_asbinary(geography: geography) → bytes

Returns the WKB representation of a given Geography.

+
Immutable
st_asbinary(geography: geography, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asbinary(geometry: geometry) → bytes

Returns the WKB representation of a given Geometry.

+
Immutable
st_asbinary(geometry: geometry, xdr_or_ndr: string) → bytes

Returns the WKB representation of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_asencodedpolyline(geometry: geometry) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Preserves 5 decimal places.

+
Immutable
st_asencodedpolyline(geometry: geometry, precision: int4) → string

Returns the geometry as an Encoded Polyline. +This format is used by Google Maps with precision=5 and by Open Source Routing Machine with precision=5 and 6. +Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+
Immutable
st_asewkb(geography: geography) → bytes

Returns the EWKB representation of a given Geography.

+
Immutable
st_asewkb(geometry: geometry) → bytes

Returns the EWKB representation of a given Geometry.

+
Immutable
st_asewkt(geography: geography) → string

Returns the EWKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_asewkt(geography: geography, max_decimal_digits: int) → string

Returns the EWKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry: geometry) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_asewkt(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_asewkt(geometry_str: string) → string

Returns the EWKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asewkt(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geography: geography) → string

Returns the GeoJSON representation of a given Geography. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geography: geography, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geography with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option (default for Geography)
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326
  • +
+
Immutable
st_asgeojson(geometry: geometry) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+
Immutable
st_asgeojson(geometry: geometry, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+
Immutable
st_asgeojson(geometry_str: string) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(geometry_str: string, max_decimal_digits: int, options: int) → string

Returns the GeoJSON representation of a given Geometry with max_decimal_digits output for each coordinate value.

+

Options is a flag that can be bitmasked. The options are:

+
    +
  • 0: no option
  • +
  • 1: GeoJSON BBOX
  • +
  • 2: GeoJSON Short CRS (e.g EPSG:4326)
  • +
  • 4: GeoJSON Long CRS (e.g urn:ogc:def:crs:EPSG::4326)
  • +
  • 8: GeoJSON Short CRS if not EPSG:4326 (default for Geometry)
  • +
+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asgeojson(row: tuple) → string

Returns the GeoJSON representation of a given Geometry. Coordinates have a maximum of 9 decimal digits.

+
Immutable
st_asgeojson(row: tuple, geo_column: string) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. Coordinates have a maximum of 9 decimal digits.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value.

+
Stable
st_asgeojson(row: tuple, geo_column: string, max_decimal_digits: int, pretty: bool) → string

Returns the GeoJSON representation of a given Geometry, using geo_column as the geometry for the given Feature. max_decimal_digits will be output for each coordinate value. Output will be pretty printed in JSON if pretty is true.

+
Stable
st_ashexewkb(geography: geography) → string

Returns the EWKB representation in hex of a given Geography.

+
Immutable
st_ashexewkb(geography: geography, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geography. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexewkb(geometry: geometry) → string

Returns the EWKB representation in hex of a given Geometry.

+
Immutable
st_ashexewkb(geometry: geometry, xdr_or_ndr: string) → string

Returns the EWKB representation in hex of a given Geometry. This variant has a second argument denoting the encoding - xdr for big endian and ndr for little endian.

+
Immutable
st_ashexwkb(geography: geography) → string

Returns the WKB representation in hex of a given Geography.

+
Immutable
st_ashexwkb(geometry: geometry) → string

Returns the WKB representation in hex of a given Geometry.

+
Immutable
st_askml(geography: geography) → string

Returns the KML representation of a given Geography.

+
Immutable
st_askml(geometry: geometry) → string

Returns the KML representation of a given Geometry.

+
Immutable
st_askml(geometry_str: string) → string

Returns the KML representation of a given Geometry.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping. +Uses 4096 as the tile extent size in tile coordinate space.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds. +Uses 256 as the buffer size in tile coordinate space for geometry clipping.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry will be clipped can be transformed.

+
Immutable
st_asmvtgeom(geometry: geometry, bbox: box2d, extent: int, buffer: int, clip: bool) → geometry

Transforms a geometry into the coordinate space of a MVT (Mapbox Vector Tile) tile, clipping it to the tile bounds if required.

+

The geometry must be in the coordinate system of the target map. +The function attempts to preserve geometry validity, and corrects it if needed. This may cause the result geometry to collapse to a lower dimension. +The rectangular bounds of the tile in the target map coordinate space must be provided, so the geometry can be transformed, and clipped if required.

+
Immutable
st_astext(geography: geography) → string

Returns the WKT representation of a given Geography. A default of 15 decimal digits is used.

+
Immutable
st_astext(geography: geography, max_decimal_digits: int) → string

Returns the WKT representation of a given Geography. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry: geometry) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+
Immutable
st_astext(geometry: geometry, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+
Immutable
st_astext(geometry_str: string) → string

Returns the WKT representation of a given Geometry. A maximum of 15 decimal digits is used.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astext(geometry_str: string, max_decimal_digits: int) → string

Returns the WKT representation of a given Geometry. The max_decimal_digits parameter controls the maximum decimal digits to print after the .. Use -1 to print as many digits as required to rebuild the same number.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_astwkb(geometry: geometry, precision_xy: int, precision_z: int, precision_m: int) → bytes

Returns the TWKB representation of a given geometry.

+
Immutable
st_azimuth(geography_a: geography, geography_b: geography) → float

Returns the azimuth in radians of the segment defined by the given point geographies, or NULL if the two points are coincident. It is solved using the Inverse geodesic problem.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_azimuth(geometry_a: geometry, geometry_b: geometry) → float

Returns the azimuth in radians of the segment defined by the given point geometries, or NULL if the two points are coincident.

+

The azimuth is angle is referenced from north, and is positive clockwise: North = 0; East = π/2; South = π; West = 3π/2.

+
Immutable
st_bdpolyfromtext(str: string, srid: int) → geometry

Returns a Polygon from multilinestring WKT with a SRID. If the input is not a multilinestring an error will be thrown.

+
Immutable
st_boundary(geometry: geometry) → geometry

Returns the closure of the combinatorial boundary of this Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_box2dfromgeohash(geohash: string) → box2d

Return a Box2D from a GeoHash string with max precision.

+
Immutable
st_box2dfromgeohash(geohash: string, precision: int) → box2d

Return a Box2D from a GeoHash string with supplied precision.

+
Immutable
st_buffer(geography: geography, distance: float) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, buffer_style_params: string) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geography: geography, distance: float, quad_segs: int) → geography

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+
Immutable
st_buffer(geometry: geometry, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry: geometry, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_buffer(geometry_str: string, distance: decimal) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, buffer_style_params: string) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant takes in a space separate parameter string, which will augment the buffer styles. Valid parameters are:

+
    +
  • quad_segs=<int>, default 8
  • +
  • endcap=<round|flat|butt|square>, default round
  • +
  • join=<round|mitre|miter|bevel>, default round
  • +
  • side=<both|left|right>, default both
  • +
  • mitre_limit=<float>, default 5.0
  • +
+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: float, quad_segs: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance from the +given Geometry.

+

This variant approximates the circle into quad_seg segments per line (the default is 8).

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_buffer(geometry_str: string, distance: int) → geometry

Returns a Geometry that represents all points whose distance is less than or equal to the given distance +from the given Geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_centroid(geography: geography) → geography

Returns the centroid of given geography. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geography: geography, use_spheroid: bool) → geography

Returns the centroid of given geography.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_centroid(geometry: geometry) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_centroid(geometry_str: string) → geometry

Returns the centroid of the given geometry.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_clipbybox2d(geometry: geometry, box2d: box2d) → geometry

Clips the geometry to conform to the bounding box specified by box2d.

+
Immutable
st_closestpoint(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the 2-dimensional point on geometry_a that is closest to geometry_b. This is the first point of the shortest line.

+
Immutable
st_collectionextract(geometry: geometry, type: int) → geometry

Given a collection, returns a multitype consisting only of elements of the specified type. If there are no elements of the given type, an EMPTY geometry is returned. Types are specified as 1=POINT, 2=LINESTRING, 3=POLYGON - other types are not supported.

+
Immutable
st_collectionhomogenize(geometry: geometry) → geometry

Returns the “simplest” representation of a collection’s contents. Collections of a single type will be returned as an appopriate multitype, or a singleton if it only contains a single geometry.

+
Immutable
st_combinebbox(box2d: box2d, geometry: geometry) → box2d

Combines the current bounding box with the bounding box of the Geometry.

+
Immutable
st_contains(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no points of geometry_b lie in the exterior of geometry_a, and there is at least one point in the interior of geometry_b that lies in the interior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_containsproperly(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_b intersects the interior of geometry_a but not the boundary or exterior of geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_convexhull(geometry: geometry) → geometry

Returns a geometry that represents the Convex Hull of the given geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_coorddim(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_coveredby(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_a is outside geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_coveredby(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_a is outside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_covers(geography_a: geography, geography_b: geography) → bool

Returns true if no point in geography_b is outside geography_a.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_covers(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if no point in geometry_b is outside geometry_a.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_crosses(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a has some - but not all - interior points in common with geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, inclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than or equal to distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dfullywithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if every pair of points comprising geometry_a and geometry_b are within distance units, exclusive. In other words, the ST_MaxDistance between geometry_a and geometry_b is less than distance units.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_difference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the difference of two Geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_dimension(geometry: geometry) → int

Returns the number of topological dimensions of a given Geometry.

+
Immutable
st_disjoint(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a does not overlap, touch or is within geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_distance(geography_a: geography, geography_b: geography) → float

Returns the distance in meters between geography_a and geography_b. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geography_a: geography, geography_b: geography, use_spheroid: bool) → float

Returns the distance in meters between geography_a and geography_b.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_distance(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance between the given geometries.

+
Immutable
st_distance(geometry_a_str: string, geometry_b_str: string) → float

Returns the distance between the given geometries.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_distancesphere(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_distancespheroid(geometry_a: geometry, geometry_b: geometry) → float

Returns the distance in meters between geometry_a and geometry_b assuming the coordinates represent lng/lat points on a spheroid.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, inclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithin(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, inclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive. Uses a spheroid to perform the operation.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geography_a: geography, geography_b: geography, distance: float, use_spheroid: bool) → bool

Returns true if any of geography_a is within distance meters of geography_b, exclusive.

+

When operating on a spheroid, this function will use the sphere to calculate the closest two points. The spheroid distance between these two points is calculated using GeographicLib. This follows observed PostGIS behavior.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a: geometry, geometry_b: geometry, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_dwithinexclusive(geometry_a_str: string, geometry_b_str: string, distance: float) → bool

Returns true if any of geometry_a is within distance units of geometry_b, exclusive.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_endpoint(geometry: geometry) → geometry

Returns the last point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_envelope(box2d: box2d) → geometry

Returns a bounding geometry for the given box.

+
Immutable
st_envelope(geometry: geometry) → geometry

Returns a bounding envelope for the given geometry.

+

For geometries which have a POINT or LINESTRING bounding box (i.e. is a single point +or a horizontal or vertical line), a POINT or LINESTRING is returned. Otherwise, the +returned POLYGON will be ordered Bottom Left, Top Left, Top Right, Bottom Right, +Bottom Left.

+
Immutable
st_equals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is spatially equal to geometry_b, i.e. ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = true.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_estimatedextent(schema_name: string, table_name: string, geocolumn_name: string, parent_only: bool) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+

The parent_only boolean is always ignored.

+
Stable
st_estimatedextent(table_name: string, geocolumn_name: string) → box2d

Returns the estimated extent of the geometries in the column of the given table. This currently always returns NULL.

+
Stable
st_expand(box2d: box2d, delta: float) → box2d

Extends the box2d by delta units across all dimensions.

+
Immutable
st_expand(box2d: box2d, delta_x: float, delta_y: float) → box2d

Extends the box2d by delta_x units in the x dimension and delta_y units in the y dimension.

+
Immutable
st_expand(geometry: geometry, delta: float) → geometry

Extends the bounding box represented by the geometry by delta units across all dimensions, returning a Polygon representing the new bounding box.

+
Immutable
st_expand(geometry: geometry, delta_x: float, delta_y: float) → geometry

Extends the bounding box represented by the geometry by delta_x units in the x dimension and delta_y units in the y dimension, returning a Polygon representing the new bounding box.

+
Immutable
st_exteriorring(geometry: geometry) → geometry

Returns the exterior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon.

+
Immutable
st_flipcoordinates(geometry: geometry) → geometry

Returns a new geometry with the X and Y axes flipped.

+
Immutable
st_force2d(geometry: geometry) → geometry

Returns a Geometry that is forced into XY layout with any Z or M dimensions discarded.

+
Immutable
st_force3d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to 0. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dm(geometry: geometry, defaultM: float) → geometry

Returns a Geometry that is forced into XYM layout. If a M coordinate doesn’t exist, it will be set to the specified default M value. If a Z coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate is present, it will be discarded.

+
Immutable
st_force3dz(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate is present, it will be discarded.

+
Immutable
st_force4d(geometry: geometry) → geometry

Returns a Geometry that is forced into XYZM layout. If a Z coordinate doesn’t exist, it will be set to 0. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified default Z value. If a M coordinate doesn’t exist, it will be set to 0.

+
Immutable
st_force4d(geometry: geometry, defaultZ: float, defaultM: float) → geometry

Returns a Geometry that is forced into XYZ layout. If a Z coordinate doesn’t exist, it will be set to the specified Z value. If a M coordinate doesn’t exist, it will be set to the specified M value.

+
Immutable
st_forcecollection(geometry: geometry) → geometry

Converts the geometry into a GeometryCollection.

+
Immutable
st_forcepolygonccw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_forcepolygoncw(geometry: geometry) → geometry

Returns a Geometry where all Polygon objects have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are unchanged.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Frechet distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_frechetdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Frechet distance between the given geometries, with the given segment densification (range 0.0-1.0, -1 to disable).

+

Smaller densify_frac gives a more accurate Fréchet distance. However, the computation time and memory usage increases with the square of the number of subsegments.

+

This function utilizes the GEOS module.

+
Immutable
st_generatepoints(geometry: geometry, npoints: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. Uses system time as a seed. +The requested number of points must be not larger than 65336.

+
Volatile
st_generatepoints(geometry: geometry, npoints: int4, seed: int4) → geometry

Generates pseudo-random points until the requested number are found within the input area. +The requested number of points must be not larger than 65336.

+
Immutable
st_geogfromewkb(val: bytes) → geography

Returns the Geography from an EWKB representation.

+
Immutable
st_geogfromewkt(val: string) → geography

Returns the Geography from an EWKT representation.

+
Immutable
st_geogfromgeojson(val: string) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromgeojson(val: jsonb) → geography

Returns the Geography from an GeoJSON representation.

+
Immutable
st_geogfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geogfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geogfromwkb(bytes: bytes, srid: int) → geography

Returns the Geography from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geogfromwkb(val: bytes) → geography

Returns the Geography from a WKB (or EWKB) representation.

+
Immutable
st_geographyfromtext(str: string, srid: int) → geography

Returns the Geography from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geographyfromtext(val: string) → geography

Returns the Geography from a WKT or EWKT representation.

+
Immutable
st_geohash(geography: geography) → string

Returns a GeoHash representation of the geeographywith full precision if a point is provided, or with variable precision based on the size of the feature.

+
Immutable
st_geohash(geography: geography, precision: int) → string

Returns a GeoHash representation of the geography with the supplied precision.

+
Immutable
st_geohash(geometry: geometry) → string

Returns a GeoHash representation of the geometry with full precision if a point is provided, or with variable precision based on the size of the feature. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geohash(geometry: geometry, precision: int) → string

Returns a GeoHash representation of the geometry with the supplied precision. This will error any coordinates are outside the bounds of longitude/latitude.

+
Immutable
st_geomcollfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomcollfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geomcollfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not GeometryCollection, NULL is returned.

+
Immutable
st_geometryfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geometryfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geometryn(geometry: geometry, n: int) → geometry

Returns the n-th Geometry (1-indexed). Returns NULL if out of bounds.

+
Immutable
st_geometrytype(geometry: geometry) → string

Returns the type of geometry as a string prefixed with ST_.

+

This function utilizes the GEOS module.

+
Immutable
st_geomfromewkb(val: bytes) → geometry

Returns the Geometry from an EWKB representation.

+
Immutable
st_geomfromewkt(val: string) → geometry

Returns the Geometry from an EWKT representation.

+
Immutable
st_geomfromgeohash(geohash: string) → geometry

Return a POLYGON Geometry from a GeoHash string with max precision.

+
Immutable
st_geomfromgeohash(geohash: string, precision: int) → geometry

Return a POLYGON Geometry from a GeoHash string with supplied precision.

+
Immutable
st_geomfromgeojson(val: string) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromgeojson(val: jsonb) → geometry

Returns the Geometry from an GeoJSON representation.

+
Immutable
st_geomfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_geomfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_geomfromwkb(bytes: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with the given SRID set.

+
Immutable
st_geomfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_hasarc(geometry: geometry) → bool

Returns whether there is a CIRCULARSTRING in the geometry.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the Hausdorff distance between the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_hausdorffdistance(geometry_a: geometry, geometry_b: geometry, densify_frac: float) → float

Returns the Hausdorff distance between the given geometries, with the given segment densification (range 0.0-1.0).

+

This function utilizes the GEOS module.

+
Immutable
st_interiorringn(geometry: geometry, n: int) → geometry

Returns the n-th (1-indexed) interior ring of a Polygon as a LineString. Returns NULL if the shape is not a Polygon, or the ring does not exist.

+
Immutable
st_intersection(geography_a: geography, geography_b: geography) → geography

Returns the point intersections of the given geographies.

+

This operation is done by transforming the object into a Geometry. This occurs by translating +the Geography objects into Geometry objects before applying an LAEA, UTM or Web Mercator +based projection based on the bounding boxes of the given Geography objects. When the result is +calculated, the result is transformed back into a Geography with SRID 4326.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_intersection(geometry_a_str: string, geometry_b_str: string) → geometry

Returns the point intersections of the given geometries.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_intersects(geography_a: geography, geography_b: geography) → bool

Returns true if geography_a shares any portion of space with geography_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the S2 library for spherical calculations.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_intersects(geometry_a_str: string, geometry_b_str: string) → bool

Returns true if geometry_a shares any portion of space with geometry_b.

+

The calculations performed are have a precision of 1cm.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_isclosed(geometry: geometry) → bool

Returns whether the geometry is closed as defined by whether the start and end points are coincident. Points are considered closed, empty geometries are not. For collections and multi-types, all members must be closed, as must all polygon rings.

+
Immutable
st_iscollection(geometry: geometry) → bool

Returns whether the geometry is of a collection type (including multi-types).

+
Immutable
st_isempty(geometry: geometry) → bool

Returns whether the geometry is empty.

+
Immutable
st_ispolygonccw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the counter-clockwise orientation and interior rings in the clockwise orientation. Non-Polygon objects are considered counter-clockwise.

+
Immutable
st_ispolygoncw(geometry: geometry) → bool

Returns whether the Polygon objects inside the Geometry have exterior rings in the clockwise orientation and interior rings in the counter-clockwise orientation. Non-Polygon objects are considered clockwise.

+
Immutable
st_isring(geometry: geometry) → bool

Returns whether the geometry is a single linestring that is closed and simple, as defined by ST_IsClosed and ST_IsSimple.

+

This function utilizes the GEOS module.

+
Immutable
st_issimple(geometry: geometry) → bool

Returns true if the geometry has no anomalous geometric points, e.g. that it intersects with or lies tangent to itself.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry) → bool

Returns whether the geometry is valid as defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalid(geometry: geometry, flags: int) → bool

Returns whether the geometry is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry) → string

Returns a string containing the reason the geometry is invalid along with the point of interest, or “Valid Geometry” if it is valid. Validity is defined by the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidreason(geometry: geometry, flags: int) → string

Returns the reason the geometry is invalid or “Valid Geometry” if it is valid.

+

For flags=0, validity is defined by the OGC spec.

+

For flags=1, validity considers self-intersecting rings forming holes as valid as per ESRI. This is not valid under OGC and CRDB spatial operations may not operate correctly.

+

This function utilizes the GEOS module.

+
Immutable
st_isvalidtrajectory(geometry: geometry) → bool

Returns whether the geometry encodes a valid trajectory.

+

Note the geometry must be a LineString with M coordinates.

+
Immutable
st_length(geography: geography) → float

Returns the length of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geography: geography, use_spheroid: bool) → float

Returns the length of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_length(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_length(geometry_str: string) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+

This variant will cast all geometry_str arguments into Geometry types.

+
Immutable
st_length2d(geometry: geometry) → float

Returns the length of the given geometry.

+

Note ST_Length is only valid for LineString - use ST_Perimeter for Polygon.

+

This function utilizes the GEOS module.

+
Immutable
st_linecrossingdirection(linestring_a: geometry, linestring_b: geometry) → int

Returns an interger value defining behavior of crossing of lines: +0: lines do not cross, +-1: linestring_b crosses linestring_a from right to left, +1: linestring_b crosses linestring_a from left to right, +-2: linestring_b crosses linestring_a multiple times from right to left, +2: linestring_b crosses linestring_a multiple times from left to right, +-3: linestring_b crosses linestring_a multiple times from left to left, +3: linestring_b crosses linestring_a multiple times from right to right.

+

Note that the top vertex of the segment touching another line does not count as a crossing, but the bottom vertex of segment touching another line is considered a crossing.

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string) → geometry

Creates a LineString from an Encoded Polyline string.

+

Returns valid results only if the polyline was encoded with 5 decimal places.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefromencodedpolyline(encoded_polyline: string, precision: int4) → geometry

Creates a LineString from an Encoded Polyline string.

+

Precision specifies how many decimal places will be preserved in Encoded Polyline. Value should be the same on encoding and decoding, or coordinates will be incorrect.

+

See http://developers.google.com/maps/documentation/utilities/polylinealgorithm

+
Immutable
st_linefrommultipoint(geometry: geometry) → geometry

Creates a LineString from a MultiPoint geometry.

+
Immutable
st_linefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_lineinterpolatepoint(geometry: geometry, fraction: float) → geometry

Returns a point along the given LineString which is at given fraction of LineString’s total length.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_lineinterpolatepoints(geometry: geometry, fraction: float, repeat: bool) → geometry

Returns one or more points along the LineString which is at an integral multiples of given fraction of LineString’s total length. If repeat is false (default true) then it returns first point.

+

Note If the result has zero or one points, it will be returned as a POINT. If it has two or more points, it will be returned as a MULTIPOINT.

+

This function utilizes the GEOS module.

+
Immutable
st_linelocatepoint(line: geometry, point: geometry) → float

Returns a float between 0 and 1 representing the location of the closest point on LineString to the given Point, as a fraction of total 2d line length.

+
Immutable
st_linemerge(geometry: geometry) → geometry

Returns a LineString or MultiLineString by joining together constituents of a MultiLineString with matching endpoints. If the input is not a MultiLineString or LineString, an empty GeometryCollection is returned.

+

This function utilizes the GEOS module.

+
Immutable
st_linestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not LineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_linestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not LineString, NULL is returned.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: decimal, end_fraction: decimal) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_linesubstring(linestring: geometry, start_fraction: float, end_fraction: float) → geometry

Return a linestring being a substring of the input one starting and ending at the given fractions of total 2D length. Second and third arguments are float8 values between 0 and 1.

+
Immutable
st_longestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the max distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the maximum distance between the geometry’s vertexes. The function will return the longest line that was discovered first when comparing maximum distances if more than one is found.

+
Immutable
st_m(geometry: geometry) → float

Returns the M coordinate of a geometry if it is a Point.

+
Immutable
st_makebox2d(geometry_a: geometry, geometry_b: geometry) → box2d

Creates a box2d from two points. Errors if arguments are not two non-empty points.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with SRID 0.

+
Immutable
st_makeenvelope(xmin: float, ymin: float, xmax: float, ymax: float, srid: int) → geometry

Creates a rectangular Polygon from the minimum and maximum values for X and Y with the given SRID.

+
Immutable
st_makepoint(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float) → geometry

Returns a new Point with the given X, Y, and Z coordinates.

+
Immutable
st_makepoint(x: float, y: float, z: float, m: float) → geometry

Returns a new Point with the given X, Y, Z, and M coordinates.

+
Immutable
st_makepointm(x: float, y: float, m: float) → geometry

Returns a new Point with the given X, Y, and M coordinates.

+
Immutable
st_makepolygon(geometry: geometry) → geometry

Returns a new Polygon with the given outer LineString.

+
Immutable
st_makepolygon(outer: geometry, interior: anyelement[]) → geometry

Returns a new Polygon with the given outer LineString and interior (hole) LineString(s).

+
Immutable
st_makevalid(geometry: geometry) → geometry

Returns a valid form of the given geometry according to the OGC spec.

+

This function utilizes the GEOS module.

+
Immutable
st_maxdistance(geometry_a: geometry, geometry_b: geometry) → float

Returns the maximum distance across every pair of points comprising the given geometries. Note if the geometries are the same, it will return the maximum distance between the geometry’s vertexes.

+
Immutable
st_memsize(geometry: geometry) → int

Returns the amount of memory space (in bytes) the geometry takes.

+
Immutable
st_minimumboundingcircle(geometry: geometry) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingcircle(geometry: geometry, num_segs: int) → geometry

Returns the smallest circle polygon that can fully contain a geometry.

+
Immutable
st_minimumboundingradius(geometry: geometry) → tuple{geometry AS center, float AS radius}

Returns a record containing the center point and radius of the smallest circle that can fully contains the given geometry.

+
Immutable
st_minimumclearance(geometry: geometry) → float

Returns the minimum distance a vertex can move before producing an invalid geometry. Returns Infinity if no minimum clearance can be found (e.g. for a single point).

+
Immutable
st_minimumclearanceline(geometry: geometry) → geometry

Returns a LINESTRING spanning the minimum distance a vertex can move before producing an invalid geometry. If no minimum clearance can be found (e.g. for a single point), an empty LINESTRING is returned.

+
Immutable
st_mlinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mlinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mlinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_mpointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_mpolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_mpolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_mpolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multi(geometry: geometry) → geometry

Returns the geometry as a new multi-geometry, e.g converts a POINT to a MULTIPOINT. If the input is already a multitype or collection, it is returned as is.

+
Immutable
st_multilinefromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinefromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinefromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multilinestringfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multilinestringfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiLineString, NULL is returned.

+
Immutable
st_multipointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPoint, NULL is returned.

+
Immutable
st_multipolyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_multipolygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_multipolygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not MultiPolygon, NULL is returned.

+
Immutable
st_ndims(geometry: geometry) → int

Returns the number of coordinate dimensions of a given Geometry.

+
Immutable
st_node(geometry: geometry) → geometry

Adds a node on a geometry for each intersection. Resulting geometry is always a MultiLineString.

+
Immutable
st_normalize(geometry: geometry) → geometry

Returns the geometry in its normalized form.

+

This function utilizes the GEOS module.

+
Immutable
st_npoints(geometry: geometry) → int

Returns the number of points in a given Geometry. Works for any shape type.

+
Immutable
st_nrings(geometry: geometry) → int

Returns the number of rings in a Polygon Geometry. Returns 0 if the shape is not a Polygon.

+
Immutable
st_numgeometries(geometry: geometry) → int

Returns the number of shapes inside a given Geometry.

+
Immutable
st_numinteriorring(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numinteriorrings(geometry: geometry) → int

Returns the number of interior rings in a Polygon Geometry. Returns NULL if the shape is not a Polygon.

+
Immutable
st_numpoints(geometry: geometry) → int

Returns the number of points in a LineString. Returns NULL if the Geometry is not a LineString.

+
Immutable
st_orderingequals(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is exactly equal to geometry_b, having all coordinates in the same order, as well as the same type, SRID, bounding box, and so on.

+
Immutable
st_orientedenvelope(geometry: geometry) → geometry

Returns a minimum rotated rectangle enclosing a geometry. +Note that more than one minimum rotated rectangle may exist. +May return a Point or LineString in the case of degenerate inputs.

+
Immutable
st_overlaps(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a intersects but does not completely contain geometry_b, or vice versa. “Does not completely” implies ST_Within(geometry_a, geometry_b) = ST_Within(geometry_b, geometry_a) = false.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_perimeter(geography: geography) → float

Returns the perimeter of the given geography in meters. Uses a spheroid to perform the operation.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geography: geography, use_spheroid: bool) → float

Returns the perimeter of the given geography in meters.

+

This function utilizes the S2 library for spherical calculations.

+

This function utilizes the GeographicLib library for spheroid calculations.

+
Immutable
st_perimeter(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_perimeter2d(geometry: geometry) → float

Returns the perimeter of the given geometry.

+

Note ST_Perimeter is only valid for Polygon - use ST_Length for LineString.

+

This function utilizes the GEOS module.

+
Immutable
st_point(x: float, y: float) → geometry

Returns a new Point with the given X and Y coordinates.

+
Immutable
st_pointfromgeohash(geohash: string) → geometry

Return a POINT Geometry from a GeoHash string with max precision.

+
Immutable
st_pointfromgeohash(geohash: string, precision: int) → geometry

Return a POINT Geometry from a GeoHash string with supplied precision.

+
Immutable
st_pointfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Point, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_pointfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Point, NULL is returned.

+
Immutable
st_pointinsidecircle(geometry: geometry, x_coord: float, y_coord: float, radius: float) → bool

Returns the true if the geometry is a point and is inside the circle. Returns false otherwise.

+
Immutable
st_pointn(geometry: geometry, n: int) → geometry

Returns the n-th Point of a LineString (1-indexed). Returns NULL if out of bounds or not a LineString.

+
Immutable
st_pointonsurface(geometry: geometry) → geometry

Returns a point that intersects with the given Geometry.

+

This function utilizes the GEOS module.

+
Immutable
st_points(geometry: geometry) → geometry

Returns all coordinates in the given Geometry as a MultiPoint, including duplicates.

+
Immutable
st_polyfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polyfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polyfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygon(geometry: geometry, srid: int) → geometry

Returns a new Polygon from the given LineString and sets its SRID. It is equivalent to ST_MakePolygon with a single argument followed by ST_SetSRID.

+
Immutable
st_polygonfromtext(str: string, srid: int) → geometry

Returns the Geometry from a WKT or EWKT representation with an SRID. If the shape underneath is not Polygon, NULL is returned. If the SRID is present in both the EWKT and the argument, the argument value is used.

+
Immutable
st_polygonfromtext(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_polygonfromwkb(wkb: bytes, srid: int) → geometry

Returns the Geometry from a WKB (or EWKB) representation with an SRID. If the shape underneath is not Polygon, NULL is returned.

+
Immutable
st_project(geography: geography, distance: float, azimuth: float) → geography

Returns a point projected from a start point along a geodesic using a given distance and azimuth (bearing). +This is known as the direct geodesic problem.

+

The distance is given in meters. Negative values are supported.

+

The azimuth (also known as heading or bearing) is given in radians. It is measured clockwise from true north (azimuth zero). +East is azimuth π/2 (90 degrees); south is azimuth π (180 degrees); west is azimuth 3π/2 (270 degrees). +Negative azimuth values and values greater than 2π (360 degrees) are supported.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b.

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, bnr: int) → string

Returns the DE-9IM spatial relation between geometry_a and geometry_b using the given boundary node rule (1:OGC/MOD2, 2:Endpoint, 3:MultivalentEndpoint, 4:MonovalentEndpoint).

+

This function utilizes the GEOS module.

+
Immutable
st_relate(geometry_a: geometry, geometry_b: geometry, pattern: string) → bool

Returns whether the DE-9IM spatial relation between geometry_a and geometry_b matches the DE-9IM pattern.

+

This function utilizes the GEOS module.

+
Immutable
st_relatematch(intersection_matrix: string, pattern: string) → bool

Returns whether the given DE-9IM intersection matrix satisfies the given pattern.

+
Immutable
st_removepoint(line_string: geometry, index: int) → geometry

Removes the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_removerepeatedpoints(geometry: geometry) → geometry

Returns a geometry with repeated points removed.

+
Immutable
st_removerepeatedpoints(geometry: geometry, tolerance: float) → geometry

Returns a geometry with repeated points removed, within the given distance tolerance.

+
Immutable
st_reverse(geometry: geometry) → geometry

Returns a modified geometry by reversing the order of its vertices.

+
Immutable
st_rotate(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_point: geometry) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotate(g: geometry, angle_radians: float, origin_x: float, origin_y: float) → geometry

Returns a modified Geometry whose coordinates are rotated around the provided origin by a rotation angle.

+
Immutable
st_rotatex(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the x axis by a rotation angle.

+
Immutable
st_rotatey(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the y axis by a rotation angle.

+
Immutable
st_rotatez(g: geometry, angle_radians: float) → geometry

Returns a modified Geometry whose coordinates are rotated about the z axis by a rotation angle.

+
Immutable
st_s2covering(geography: geography) → geography

Returns a geography which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geography: geography, settings: string) → geography

Returns a geography which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geography, 's2_max_level=15,s2_level_mod=3').

+
Immutable
st_s2covering(geometry: geometry) → geometry

Returns a geometry which represents the S2 covering used by the index using the default index configuration.

+
Immutable
st_s2covering(geometry: geometry, settings: string) → geometry

Returns a geometry which represents the S2 covering used by the index using the index configuration specified by the settings parameter.

+

The settings parameter uses the same format as the parameters inside the WITH in CREATE INDEX ... WITH (...), e.g. CREATE INDEX t_idx ON t USING GIST(geom) WITH (s2_max_level=15, s2_level_mod=3) can be tried using SELECT ST_S2Covering(geometry, 's2_max_level=15,s2_level_mod=3')

+
Immutable
st_scale(g: geometry, factor: geometry) → geometry

Returns a modified Geometry scaled by taking in a Geometry as the factor.

+
Immutable
st_scale(g: geometry, factor: geometry, origin: geometry) → geometry

Returns a modified Geometry scaled by the Geometry factor relative to a false origin.

+
Immutable
st_scale(geometry: geometry, x_factor: float, y_factor: float) → geometry

Returns a modified Geometry scaled by the given factors.

+
Immutable
st_segmentize(geography: geography, max_segment_length_meters: float) → geography

Returns a modified Geography having no segment longer than the given max_segment_length meters.

+

The calculations are done on a sphere.

+

This function utilizes the S2 library for spherical calculations.

+
Immutable
st_segmentize(geometry: geometry, max_segment_length: float) → geometry

Returns a modified Geometry having no segment longer than the given max_segment_length. Length units are in units of spatial reference.

+
Immutable
st_setpoint(line_string: geometry, index: int, point: geometry) → geometry

Sets the Point at the given 0-based index and returns the modified LineString geometry.

+
Immutable
st_setsrid(geography: geography, srid: int) → geography

Sets a Geography to a new SRID without transforming the coordinates.

+
Immutable
st_setsrid(geometry: geometry, srid: int) → geometry

Sets a Geometry to a new SRID without transforming the coordinates.

+
Immutable
st_sharedpaths(geometry_a: geometry, geometry_b: geometry) → geometry

Returns a collection containing paths shared by the two input geometries.

+

Those going in the same direction are in the first element of the collection, +those going in the opposite direction are in the second element. +The paths themselves are given in the direction of the first geometry.

+
Immutable
st_shiftlongitude(geometry: geometry) → geometry

Returns a modified version of a geometry in which the longitude (X coordinate) of each point is incremented by 360 if it is <0 and decremented by 360 if it is >180. The result is only meaningful if the coordinates are in longitude/latitude.

+
Immutable
st_shortestline(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the LineString corresponds to the minimum distance across every pair of points comprising the given geometries.

+

Note if geometries are the same, it will return the LineString with the minimum distance between the geometry’s vertexes. The function will return the shortest line that was discovered first when comparing minimum distances if more than one is found.

+
Immutable
st_simplify(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm.

+

This function utilizes the GEOS module.

+
Immutable
st_simplify(geometry: geometry, tolerance: float, preserve_collapsed: bool) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, retaining objects that would be too small given the tolerance if preserve_collapsed is set to true.

+
Immutable
st_simplifypreservetopology(geometry: geometry, tolerance: float) → geometry

Simplifies the given geometry using the Douglas-Peucker algorithm, avoiding the creation of invalid geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_snap(input: geometry, target: geometry, tolerance: float) → geometry

Snaps the vertices and segments of input geometry the target geometry’s vertices. +Tolerance is used to control where snapping is performed. The result geometry is the input geometry with the vertices snapped. +If no snapping occurs then the input geometry is returned unchanged.

+
Immutable
st_snaptogrid(geometry: geometry, origin: geometry, size_x: float, size_y: float, size_z: float, size_m: float) → geometry

Snap a geometry to a grid defined by the given origin and X, Y, Z, and M cell sizes. Any dimension with a 0 cell size will not be snapped.

+
Immutable
st_snaptogrid(geometry: geometry, origin_x: float, origin_y: float, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y based on an origin of (origin_x, origin_y).

+
Immutable
st_snaptogrid(geometry: geometry, size: float) → geometry

Snap a geometry to a grid of the given size. The specified size is only used to snap X and Y coordinates.

+
Immutable
st_snaptogrid(geometry: geometry, size_x: float, size_y: float) → geometry

Snap a geometry to a grid of with X coordinates snapped to size_x and Y coordinates snapped to size_y.

+
Immutable
st_srid(geography: geography) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geography as defined in spatial_ref_sys table.

+
Immutable
st_srid(geometry: geometry) → int

Returns the Spatial Reference Identifier (SRID) for the ST_Geometry as defined in spatial_ref_sys table.

+
Immutable
st_startpoint(geometry: geometry) → geometry

Returns the first point of a geometry which has shape LineString. Returns NULL if the geometry is not a LineString.

+
Immutable
st_subdivide(geometry: geometry) → geometry

Returns a geometry divided into parts, where each part contains no more than 256 vertices.

+
Immutable
st_subdivide(geometry: geometry, max_vertices: int4) → geometry

Returns a geometry divided into parts, where each part contains no more than the number of vertices provided.

+
Immutable
st_summary(geography: geography) → string

Returns a text summary of the contents of the geography.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_summary(geometry: geometry) → string

Returns a text summary of the contents of the geometry.

+

Flags shown square brackets after the geometry type have the following meaning:

+
    +
  • M: has M coordinate
  • +
  • Z: has Z coordinate
  • +
  • B: has a cached bounding box
  • +
  • G: is geography
  • +
  • S: has spatial reference system
  • +
+
Immutable
st_swapordinates(geometry: geometry, swap_ordinate_string: string) → geometry

Returns a version of the given geometry with given ordinates swapped. +The swap_ordinate_string parameter is a 2-character string naming the ordinates to swap. Valid names are: x, y, z and m.

+
Immutable
st_symdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_symmetricdifference(geometry_a: geometry, geometry_b: geometry) → geometry

Returns the symmetric difference of both geometries.

+

This function utilizes the GEOS module.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_tileenvelope(tileZoom: int4, tileX: int4, tileY: int4, bounds: geometry, margin: float) → geometry

Creates a rectangular Polygon giving the extent of a tile in the XYZ tile system. +The tile is specifed by the zoom level Z and the XY index of the tile in the grid at that level. +Can be used to define the tile bounds required by ST_AsMVTGeom to convert geometry into the MVT tile coordinate space.

+
Immutable
st_touches(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if the only points in common between geometry_a and geometry_b are on the boundary. Note points do not touch other points.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, srid: int) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates. The supplied SRID is set on the new geometry.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, from_proj_text: string, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system assuming the from_proj_text to the new to_proj_text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, srid: int) → geometry

Transforms a geometry into the given SRID coordinate reference system by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_transform(geometry: geometry, to_proj_text: string) → geometry

Transforms a geometry into the coordinate reference system referenced by the projection text by projecting its coordinates.

+

This function utilizes the PROJ library for coordinate projections.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_translate(g: geometry, delta_x: float, delta_y: float, delta_z: float) → geometry

Returns a modified Geometry translated by the given deltas.

+
Immutable
st_transscale(geometry: geometry, delta_x: float, delta_y: float, x_factor: float, y_factor: float) → geometry

Translates the geometry using the deltaX and deltaY args, then scales it using the XFactor, YFactor args, working in 2D only.

+
Immutable
st_unaryunion(geometry: geometry) → geometry

Returns a union of the components for any geometry or geometry collection provided. Dissolves boundaries of a multipolygon.

+
Immutable
st_voronoilines(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoilines(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry asthe boundaries between cells in that diagram as a MultiLineString.

+
Immutable
st_voronoipolygons(geometry: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_voronoipolygons(geometry: geometry, tolerance: float, extend_to: geometry) → geometry

Returns a two-dimensional Voronoi diagram from the vertices of the supplied geometry.

+
Immutable
st_within(geometry_a: geometry, geometry_b: geometry) → bool

Returns true if geometry_a is completely inside geometry_b.

+

This function utilizes the GEOS module.

+

This function variant will attempt to utilize any available spatial index.

+
Immutable
st_wkbtosql(val: bytes) → geometry

Returns the Geometry from a WKB (or EWKB) representation.

+
Immutable
st_wkttosql(val: string) → geometry

Returns the Geometry from a WKT or EWKT representation.

+
Immutable
st_x(geometry: geometry) → float

Returns the X coordinate of a geometry if it is a Point.

+
Immutable
st_xmax(box2d: box2d) → float

Returns the maximum X ordinate of a box2d.

+
Immutable
st_xmax(geometry: geometry) → float

Returns the maximum X ordinate of a geometry.

+
Immutable
st_xmin(box2d: box2d) → float

Returns the minimum X ordinate of a box2d.

+
Immutable
st_xmin(geometry: geometry) → float

Returns the minimum X ordinate of a geometry.

+
Immutable
st_y(geometry: geometry) → float

Returns the Y coordinate of a geometry if it is a Point.

+
Immutable
st_ymax(box2d: box2d) → float

Returns the maximum Y ordinate of a box2d.

+
Immutable
st_ymax(geometry: geometry) → float

Returns the maximum Y ordinate of a geometry.

+
Immutable
st_ymin(box2d: box2d) → float

Returns the minimum Y ordinate of a box2d.

+
Immutable
st_ymin(geometry: geometry) → float

Returns the minimum Y ordinate of a geometry.

+
Immutable
st_z(geometry: geometry) → float

Returns the Z coordinate of a geometry if it is a Point.

+
Immutable
st_zmflag(geometry: geometry) → int2

Returns a code based on the ZM coordinate dimension of a geometry (XY = 0, XYM = 1, XYZ = 2, XYZM = 3).

+
Immutable
+ +### String and byte functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
ascii(val: string) → int

Returns the character code of the first character in val. Despite the name, the function supports Unicode too.

+
Immutable
bit_count(val: bytes) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_count(val: varbit) → int

Calculates the number of bits set used to represent val.

+
Immutable
bit_length(val: bytes) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: string) → int

Calculates the number of bits used to represent val.

+
Immutable
bit_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
bitmask_and(a: string, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: string, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: string) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_and(a: varbit, b: varbit) → varbit

Calculates bitwise AND value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: string, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: string) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_or(a: varbit, b: varbit) → varbit

Calculates bitwise OR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: string, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: string) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
bitmask_xor(a: varbit, b: varbit) → varbit

Calculates bitwise XOR value of unsigned bit arrays ‘a’ and ‘b’ that may have different lengths.

+
Immutable
btrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning or end of input (applies recursively).

+

For example, btrim('doggie', 'eod') returns ggi.

+
Immutable
btrim(val: string) → string

Removes all spaces from the beginning and end of val.

+
Immutable
char_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
char_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
character_length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
character_length(val: string) → int

Calculates the number of characters in val.

+
Immutable
chr(val: int) → string

Returns the character with the code given in val. Inverse function of ascii().

+
Immutable
compress(data: bytes, codec: string) → bytes

Compress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
concat(any...) → string

Concatenates a comma-separated list of strings.

+
Immutable
concat_ws(string, any...) → string

Uses the first argument as a separator between the concatenation of the subsequent arguments.

+

For example concat_ws('!','wow','great') returns wow!great.

+
Immutable
convert_from(str: bytes, enc: string) → string

Decode the bytes in str into a string using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
convert_to(str: string, enc: string) → bytes

Encode the string str as a byte array using encoding enc. Supports encodings ‘UTF8’ and ‘LATIN1’.

+
Immutable
decode(text: string, format: string) → bytes

Decodes data using format (hex / escape / base64).

+
Immutable
decompress(data: bytes, codec: string) → bytes

Decompress data with the specified codec (gzip, ‘lz4’, ‘snappy’, 'zstd).

+
Immutable
difference(source: string, target: string) → int

Convert two strings to their Soundex codes and report the number of matching code positions.

+
Immutable
encode(data: bytes, format: string) → string

Encodes data using format (hex / escape / base64).

+
Immutable
format(string, any...) → string

Interprets the first argument as a format string similar to C sprintf and interpolates the remaining arguments.

+
Stable
from_ip(val: bytes) → string

Converts the byte string representation of an IP to its character string representation.

+
Immutable
from_uuid(val: bytes) → string

Converts the byte string representation of a UUID to its character string representation.

+
Immutable
get_bit(bit_string: varbit, index: int) → int

Extracts a bit at given index in the bit array.

+
Immutable
get_bit(byte_string: bytes, index: int) → int

Extracts a bit at the given index in the byte array.

+
Immutable
get_byte(byte_string: bytes, index: int) → int

Extracts a byte at the given index in the byte array.

+
Immutable
initcap(val: string) → string

Capitalizes the first letter of val.

+
Immutable
left(input: bytes, return_set: int) → bytes

Returns the first return_set bytes from input.

+
Immutable
left(input: string, return_set: int) → string

Returns the first return_set characters from input.

+
Immutable
length(val: bytes) → int

Calculates the number of bytes in val.

+
Immutable
length(val: string) → int

Calculates the number of characters in val.

+
Immutable
length(val: varbit) → int

Calculates the number of bits in val.

+
Immutable
lower(val: string) → string

Converts all characters in val to their lower-case equivalents.

+
Immutable
lpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the left of string.If string is longer than length it is truncated.

+
Immutable
lpad(string: string, length: int, fill: string) → string

Pads string by adding fill to the left of string to make it length. If string is longer than length it is truncated.

+
Immutable
ltrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the beginning (left-hand side) of input (applies recursively).

+

For example, ltrim('doggie', 'od') returns ggie.

+
Immutable
ltrim(val: string) → string

Removes all spaces from the beginning (left-hand side) of val.

+
Immutable
md5(bytes...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
md5(string...) → string

Calculates the MD5 hash value of a set of values.

+
Leakproof
octet_length(val: bytes) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: string) → int

Calculates the number of bytes used to represent val.

+
Immutable
octet_length(val: varbit) → int

Calculates the number of bits used to represent val.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int) → string

Replaces characters in input with overlay_val starting at start_pos (begins at 1).

+

For example, overlay('doggie', 'CAT', 2) returns dCATie.

+
Immutable
overlay(input: string, overlay_val: string, start_pos: int, end_pos: int) → string

Deletes the characters in input between start_pos and end_pos (count starts at 1), and then insert overlay_val at start_pos.

+
Immutable
parse_date(string: string, datestyle: string) → date

Parses a date assuming it is in format specified by DateStyle.

+
Immutable
parse_date(val: string) → date

Parses a date assuming it is in MDY format.

+
Immutable
parse_ident(qualified_identifier: string) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. Extra characters after the last identifier are considered an error

+
Immutable
parse_ident(qualified_identifier: string, strict: bool) → string[]

Splits qualified_identifier into an array of identifiers, removing any quoting of individual identifiers. If strict is false, then extra characters after the last identifier are ignored.

+
Immutable
parse_interval(string: string, style: string) → interval

Convert a string to an interval using the given IntervalStyle.

+
Immutable
parse_interval(val: string) → interval

Convert a string to an interval assuming the Postgres IntervalStyle.

+
Immutable
parse_time(string: string, timestyle: string) → time

Parses a time assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_time(val: string) → time

Parses a time assuming the date (if any) is in MDY format.

+
Immutable
parse_timestamp(string: string, datestyle: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates formatted using the given DateStyle.

+
Immutable
parse_timestamp(val: string) → timestamp

Convert a string containing an absolute timestamp to the corresponding timestamp assuming dates are in MDY format.

+
Immutable
parse_timetz(string: string, timestyle: string) → timetz

Parses a timetz assuming the date (if any) is in format specified by DateStyle.

+
Immutable
parse_timetz(val: string) → timetz

Parses a timetz assuming the date (if any) is in MDY format.

+
Immutable
prettify_statement(statement: string, line_width: int, align_mode: int, case_mode: int) → string

Prettifies a statement using a user-configured pretty-printing config. +Align mode values range from 0 - 3, representing no, partial, full, and extra alignment respectively. +Case mode values range between 0 - 1, representing lower casing and upper casing respectively.

+
Immutable
prettify_statement(val: string) → string

Prettifies a statement using a the default pretty-printing config.

+
Immutable
quote_ident(val: string) → string

Return val suitably quoted to serve as identifier in a SQL statement.

+
Immutable
quote_literal(val: string) → string

Return val suitably quoted to serve as string literal in a SQL statement.

+
Immutable
quote_literal(val: anyelement) → string

Coerce val to a string and then quote it as a literal.

+
Stable
quote_nullable(val: string) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Immutable
quote_nullable(val: anyelement) → string

Coerce val to a string and then quote it as a literal. If val is NULL, returns ‘NULL’.

+
Stable
regexp_extract(input: string, regex: string) → string

Returns the first match for the Regular Expression regex in input.

+
Immutable
regexp_replace(input: string, regex: string, replace: string) → string

Replaces matches for the Regular Expression regex in input with the Regular Expression replace.

+
Immutable
regexp_replace(input: string, regex: string, replace: string, flags: string) → string

Replaces matches for the regular expression regex in input with the regular expression replace using flags.

+

CockroachDB supports the following flags:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlagDescription
cCase-sensitive matching
gGlobal matching (match each substring instead of only the first)
iCase-insensitive matching
m or nNewline-sensitive (see below)
pPartial newline-sensitive matching (see below)
sNewline-insensitive (default)
wInverse partial newline-sensitive matching (see below)
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Mode. and [^...] match newlines^ and $ match line boundaries
syesno
wyesyes
pnono
m/nnoyes
+
Immutable
repeat(input: string, repeat_counter: int) → string

Concatenates input repeat_counter number of times.

+

For example, repeat('dog', 2) returns dogdog.

+
Immutable
replace(input: string, find: string, replace: string) → string

Replaces all occurrences of find with replace in input

+
Immutable
reverse(val: string) → string

Reverses the order of the string’s characters.

+
Immutable
right(input: bytes, return_set: int) → bytes

Returns the last return_set bytes from input.

+
Immutable
right(input: string, return_set: int) → string

Returns the last return_set characters from input.

+
Immutable
rpad(string: string, length: int) → string

Pads string to length by adding ’ ’ to the right of string. If string is longer than length it is truncated.

+
Immutable
rpad(string: string, length: int, fill: string) → string

Pads string to length by adding fill to the right of string. If string is longer than length it is truncated.

+
Immutable
rtrim(input: string, trim_chars: string) → string

Removes any characters included in trim_chars from the end (right-hand side) of input (applies recursively).

+

For example, rtrim('doggie', 'ei') returns dogg.

+
Immutable
rtrim(val: string) → string

Removes all spaces from the end (right-hand side) of val.

+
Immutable
set_bit(bit_string: varbit, index: int, to_set: int) → varbit

Updates a bit at given index in the bit array.

+
Immutable
set_bit(byte_string: bytes, index: int, to_set: int) → bytes

Updates a bit at the given index in the byte array.

+
Immutable
set_byte(byte_string: bytes, index: int, to_set: int) → bytes

Updates a byte at the given index in the byte array.

+
Immutable
sha1(bytes...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha1(string...) → string

Calculates the SHA1 hash value of a set of values.

+
Leakproof
sha224(bytes...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha224(string...) → string

Calculates the SHA224 hash value of a set of values.

+
Leakproof
sha256(bytes...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha256(string...) → string

Calculates the SHA256 hash value of a set of values.

+
Leakproof
sha384(bytes...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha384(string...) → string

Calculates the SHA384 hash value of a set of values.

+
Leakproof
sha512(bytes...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
sha512(string...) → string

Calculates the SHA512 hash value of a set of values.

+
Leakproof
similar_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(pattern: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern.

+
Immutable
similar_to_escape(pattern: string, escape: string) → string

Converts a SQL regexp pattern to a POSIX regexp pattern using escape as an escape token.

+
Immutable
similar_to_escape(unescaped: string, pattern: string, escape: string) → bool

Matches unescaped with pattern using escape as an escape token.

+
Immutable
split_part(input: string, delimiter: string, return_index_pos: int) → string

Splits input using delimiter and returns the field at return_index_pos (starting from 1). If return_index_pos is negative, it returns the |return_index_pos|'th field from the end.

+

For example, split_part('123.456.789.0', '.', 3) returns 789.

+
Immutable
strpos(input: bytes, find: bytes) → int

Calculates the position where the byte subarray find begins in input.

+
Immutable
strpos(input: string, find: string) → int

Calculates the position where the string find begins in input.

+

For example, strpos('doggie', 'gie') returns 4.

+
Immutable
strpos(input: varbit, find: varbit) → int

Calculates the position where the bit subarray find begins in input.

+
Immutable
substr(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substr(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substr(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substr(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substr(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: bytes, start_pos: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: bytes, start_pos: int, length: int) → bytes

Returns a byte subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: string, regex: string) → string

Returns a substring of input that matches the regular expression regex.

+
Immutable
substring(input: string, regex: string, escape_char: string) → string

Returns a substring of input that matches the regular expression regex using escape_char as your escape character instead of \.

+
Immutable
substring(input: string, start_pos: int) → string

Returns a substring of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: string, start_pos: int, length: int) → string

Returns a substring of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring(input: varbit, start_pos: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1).

+
Immutable
substring(input: varbit, start_pos: int, length: int) → varbit

Returns a bit subarray of input starting at start_pos (count starts at 1) and including up to length characters.

+
Immutable
substring_index(input: string, delim: string, count: int) → string

Returns a substring of input before count occurrences of delim. +If count is positive, the leftmost part is returned. If count is negative, the rightmost part is returned.

+
Immutable
to_char_with_style(date: date, datestyle: string) → string

Convert an date to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_char_with_style(interval: interval, style: string) → string

Convert an interval to a string using the given IntervalStyle.

+
Immutable
to_char_with_style(timestamp: timestamp, datestyle: string) → string

Convert an timestamp to a string assuming the string is formatted using the given DateStyle.

+
Immutable
to_english(val: int) → string

This function enunciates the value of its argument using English cardinals.

+
Immutable
to_hex(val: bytes) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: int) → string

Converts val to its hexadecimal representation.

+
Immutable
to_hex(val: string) → string

Converts val to its hexadecimal representation.

+
Immutable
to_ip(val: string) → bytes

Converts the character string representation of an IP to its byte string representation.

+
Immutable
to_uuid(val: string) → bytes

Converts the character string representation of a UUID to its byte string representation.

+
Immutable
translate(input: string, find: string, replace: string) → string

In input, replaces the first character from find with the first character in replace; repeat for each character in find.

+

For example, translate('doggie', 'dog', '123'); returns 1233ie.

+
Immutable
ulid_to_uuid(val: string) → uuid

Converts a ULID string to its UUID-encoded representation.

+
Immutable
unaccent(val: string) → string

Removes accents (diacritic signs) from the text provided in val.

+
Immutable
upper(val: string) → string

Converts all characters in val to their to their upper-case equivalents.

+
Immutable
+ +### System info functions + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cluster_logical_timestamp() → decimal

Returns the logical time of the current transaction as +a CockroachDB HLC in decimal form.

+

Note that uses of this function disable server-side optimizations and +may increase either contention or retry errors, or both.

+

Returns an error if run in a transaction with an isolation level weaker than SERIALIZABLE.

+
Volatile
current_database() → string

Returns the current database.

+
Stable
current_schema() → string

Returns the current schema.

+
Stable
current_schemas(include_pg_catalog: bool) → string[]

Returns the valid schemas in the search path.

+
Stable
current_user() → string

Returns the current user. This function is provided for compatibility with PostgreSQL.

+
Stable
information_schema.crdb_datums_to_bytes(any...) → bytes

Converts datums into key-encoded bytes. Supports NULLs and all data types which may be used in index keys

+
Immutable
session_user() → string

Returns the session user. This function is provided for compatibility with PostgreSQL.

+
Stable
to_regclass(text: string) → regtype

Translates a textual relation name to its OID

+
Stable
to_regnamespace(text: string) → regtype

Translates a textual schema name to its OID

+
Stable
to_regproc(text: string) → regtype

Translates a textual function or procedure name to its OID

+
Stable
to_regprocedure(text: string) → regtype

Translates a textual function or procedure name(with argument types) to its OID

+
Stable
to_regrole(text: string) → regtype

Translates a textual role name to its OID

+
Stable
to_regtype(text: string) → regtype

Translates a textual type name to its OID

+
Stable
version() → string

Returns the node’s version of CockroachDB.

+
Volatile
+ +### System repair functions + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
information_schema.crdb_delete_statement_hints(rowid: int) → int

This function deletes a statement hint by its row ID. It returns the number of deleted rows.

+
Volatile
information_schema.crdb_delete_statement_hints(statement_fingerprint: string) → int

This function deletes all statement hints matching the given statement fingerprint. The statement fingerprint argument is normalized before matching. It returns the number of deleted rows.

+
Volatile
information_schema.crdb_delete_statement_hints(statement_fingerprint: string, database: string) → int

This function deletes all statement hints matching the given statement fingerprint and database. The statement fingerprint argument is normalized before matching. It returns the number of deleted rows.

+
Volatile
information_schema.crdb_enable_statement_hints(enabled: bool, rowid: int) → int

This function enables or disables the statement hint with the given row ID. It returns the number of affected rows.

+
Volatile
information_schema.crdb_enable_statement_hints(enabled: bool, statement_fingerprint: string) → int

This function enables or disables all statement hints matching the given statement fingerprint. The statement fingerprint argument is normalized before matching. It returns the number of affected rows.

+
Volatile
information_schema.crdb_enable_statement_hints(enabled: bool, statement_fingerprint: string, database: string) → int

This function enables or disables all statement hints matching the given statement fingerprint and database. The statement fingerprint argument is normalized before matching. It returns the number of affected rows.

+
Volatile
information_schema.crdb_rewrite_inline_hints(statement_fingerprint: string, donor_sql: string) → int

This function adds an inline-hints rewrite rule for a statement fingerprint. It returns the hint ID of the newly created rewrite rule. The rewrite rule only applies to matching statement fingerprints. It first removes all inline hints from the target statement, and then copies inline hints from the donor statement.

+
Volatile
information_schema.crdb_rewrite_inline_hints(statement_fingerprint: string, donor_sql: string, database: string) → int

This function adds an inline-hints rewrite rule for a statement fingerprint, scoped to the given database. It returns the hint ID of the newly created rewrite rule. The rewrite rule only applies to matching statement fingerprints when the current database matches the specified database. It first removes all inline hints from the target statement, and then copies inline hints from the donor statement.

+
Volatile
information_schema.crdb_set_session_variable_hint(statement_fingerprint: string, variable_name: string, variable_value: string) → int

This function adds a session variable override hint for a statement fingerprint. It returns the hint ID of the newly created hint. The hint only applies to matching statement fingerprints and temporarily overrides the specified session variable for that statement only. Safe variables can be hinted without restrictions. Unsafe variables cannot be hinted when sql_safe_updates is enabled.

+
Volatile
information_schema.crdb_set_session_variable_hint(statement_fingerprint: string, variable_name: string, variable_value: string, database: string) → int

This function adds a session variable override hint for a statement fingerprint, scoped to the given database. It returns the hint ID of the newly created hint. The hint only applies to matching statement fingerprints when the current database matches the specified database, and temporarily overrides the specified session variable for that statement only. Safe variables can be hinted without restrictions. Unsafe variables cannot be hinted when sql_safe_updates is enabled.

+
Volatile
+ +### TIMETZ functions + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
current_time() → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time() → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
current_time(precision: int) → time

Returns the current transaction’s time with no time zone.

+
Stable
current_time(precision: int) → timetz

Returns the current transaction’s time with time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime() → timetz

Returns the current transaction’s time with time zone.

+
Stable
localtime(precision: int) → time

Returns the current transaction’s time with no time zone.

+

This function is the preferred overload and will be evaluated by default.

+
Stable
localtime(precision: int) → timetz

Returns the current transaction’s time with time zone.

+
Stable
+ +### Trigrams functions + + + + + + +
Function → ReturnsDescriptionVolatility
show_trgm(input: string) → string[]

Returns an array of all the trigrams in the given string.

+
Immutable
similarity(left: string, right: string) → float

Returns a number that indicates how similar the two arguments are. The range of the result is zero (indicating that the two strings are completely dissimilar) to one (indicating that the two strings are identical).

+
Immutable
+ +### UUID functions + + + + + +
Function → ReturnsDescriptionVolatility
uuid_to_ulid(val: uuid) → string

Converts a UUID-encoded ULID to its string representation.

+
Immutable
+ +### Compatibility functions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
acldefault(type: "char", ownerId: oid) → aclitem[]

Returns the default access privileges for an object of the given type belonging to the given owner.

+
Immutable
col_description(table_oid: oid, column_number: int) → string

Returns the comment for a table column, which is specified by the OID of its table and its column number. (obj_description cannot be used for table columns, since columns do not have OIDs of their own.)

+
Stable
current_setting(setting_name: string) → string

System info

+
Stable
current_setting(setting_name: string, missing_ok: bool) → string

System info

+
Stable
format_type(type_oid: oid, typemod: int) → string

Returns the SQL name of a data type that is identified by its type OID and possibly a type modifier. Currently, the type modifier is ignored.

+
Stable
getdatabaseencoding() → string

Returns the current encoding name used by the database.

+
Stable
has_any_column_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_any_column_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for any column of table.

+
Stable
has_column_privilege(table: string, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: string, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: int, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(table: oid, column: string, privilege: string) → bool

Returns whether or not the current user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: string, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: string, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: int, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_column_privilege(user: oid, table: oid, column: string, privilege: string) → bool

Returns whether or not the user has privileges for column.

+
Stable
has_database_privilege(database: string, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(database: oid, privilege: string) → bool

Returns whether or not the current user has privileges for database.

+
Stable
has_database_privilege(user: string, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: string, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: string, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_database_privilege(user: oid, database: oid, privilege: string) → bool

Returns whether or not the user has privileges for database.

+
Stable
has_foreign_data_wrapper_privilege(fdw: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(fdw: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: string, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_foreign_data_wrapper_privilege(user: oid, fdw: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign-data wrapper.

+
Stable
has_function_privilege(function: string, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(function: oid, privilege: string) → bool

Returns whether or not the current user has privileges for function.

+
Stable
has_function_privilege(user: string, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: string, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: string, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_function_privilege(user: oid, function: oid, privilege: string) → bool

Returns whether or not the user has privileges for function.

+
Stable
has_language_privilege(language: string, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(language: oid, privilege: string) → bool

Returns whether or not the current user has privileges for language.

+
Stable
has_language_privilege(user: string, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: string, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: string, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_language_privilege(user: oid, language: oid, privilege: string) → bool

Returns whether or not the user has privileges for language.

+
Stable
has_schema_privilege(schema: string, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(schema: oid, privilege: string) → bool

Returns whether or not the current user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: string, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: string, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_schema_privilege(user: oid, schema: oid, privilege: string) → bool

Returns whether or not the user has privileges for schema.

+
Stable
has_sequence_privilege(sequence: string, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(sequence: oid, privilege: string) → bool

Returns whether or not the current user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: string, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: string, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_sequence_privilege(user: oid, sequence: oid, privilege: string) → bool

Returns whether or not the user has privileges for sequence.

+
Stable
has_server_privilege(server: string, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(server: oid, privilege: string) → bool

Returns whether or not the current user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: string, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: string, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_server_privilege(user: oid, server: oid, privilege: string) → bool

Returns whether or not the user has privileges for foreign server.

+
Stable
has_system_privilege(privilege: string) → bool

Returns whether or not the current user has privileges for system.

+
Stable
has_system_privilege(user: string, privilege: string) → bool

Returns whether or not the user has privileges for system.

+
Stable
has_system_privilege(user: oid, privilege: string) → bool

Returns whether or not the user has privileges for system.

+
Stable
has_table_privilege(table: string, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(table: oid, privilege: string) → bool

Returns whether or not the current user has privileges for table.

+
Stable
has_table_privilege(user: string, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: string, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: string, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_table_privilege(user: oid, table: oid, privilege: string) → bool

Returns whether or not the user has privileges for table.

+
Stable
has_tablespace_privilege(tablespace: string, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(tablespace: oid, privilege: string) → bool

Returns whether or not the current user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: string, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: string, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_tablespace_privilege(user: oid, tablespace: oid, privilege: string) → bool

Returns whether or not the user has privileges for tablespace.

+
Stable
has_type_privilege(type: string, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(type: oid, privilege: string) → bool

Returns whether or not the current user has privileges for type.

+
Stable
has_type_privilege(user: string, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: string, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: string, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
has_type_privilege(user: oid, type: oid, privilege: string) → bool

Returns whether or not the user has privileges for type.

+
Stable
information_schema._pg_numeric_precision(typid: oid, typmod: int4) → int

Returns the precision of the given type with type modifier

+
Immutable
information_schema._pg_numeric_precision_radix(typid: oid, typmod: int4) → int

Returns the radix of the given type with type modifier

+
Immutable
information_schema._pg_numeric_scale(typid: oid, typmod: int4) → int

Returns the scale of the given type with type modifier

+
Immutable
makeaclitem(grantee: oid, grantor: oid, privileges: string, is_grantable: bool) → aclitem

Constructs an aclitem from the given grantee, grantor, privileges, and grant option.

+
Immutable
nameconcatoid(name: string, oid: oid) → name

Used in the information_schema to produce specific_name columns, which are supposed to be unique per schema. The result is the same as ($1::text || ‘_’ || $2::text)::name except that, if it would not fit in 63 characters, we make it do so by truncating the name input (not the oid).

+
Immutable
obj_description(object_oid: oid) → string

Returns the comment for a database object specified by its OID alone. This is deprecated since there is no guarantee that OIDs are unique across different system catalogs; therefore, the wrong comment might be returned.

+
Stable
obj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a database object specified by its OID and the name of the containing system catalog. For example, obj_description(123456, ‘pg_class’) would retrieve the comment for the table with OID 123456.

+
Stable
oidvectortypes(vector: oidvector) → string

Generates a comma seperated string of type names from an oidvector.

+
Stable
pg_backend_pid() → int

Returns a numerical ID attached to this session. This ID is part of the query cancellation key used by the wire protocol. This function was only added for compatibility, and unlike in Postgres, the returned value does not correspond to a real process ID.

+
Stable
pg_collation_for(str: anyelement) → string

Returns the collation of the argument

+
Stable
pg_column_is_updatable(reloid: oid, attnum: int2, include_triggers: bool) → bool

Returns whether the given column can be updated.

+
Stable
pg_column_size(any...) → int

Return size in bytes of the column provided as an argument

+
Stable
pg_function_is_visible(oid: oid) → bool

Returns whether the function with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_get_constraintdef(constraint_oid: oid) → string

Returns the definition of the specified constraint.

+
Stable
pg_get_constraintdef(constraint_oid: oid, pretty_bool: bool) → string

Returns the definition of the specified constraint.

+
Stable
pg_get_function_arg_default(func_oid: oid, arg_num: int4) → string

Get textual representation of a function argument’s default value. The second argument of this function is the argument number among all arguments (i.e. proallargtypes, not proargtypes), starting with 1, because that’s how information_schema.sql uses it.

+
Stable
pg_get_function_arguments(func_oid: oid) → string

Returns the argument list (with defaults) necessary to identify a function, in the form it would need to appear in within CREATE FUNCTION.

+
Stable
pg_get_function_identity_arguments(func_oid: oid) → string

Returns the argument list (without defaults) necessary to identify a function, in the form it would need to appear in within ALTER FUNCTION, for instance.

+
Stable
pg_get_function_result(func_oid: oid) → string

Returns the types of the result of the specified function.

+
Stable
pg_get_functiondef(func_oid: oid) → string

For user-defined functions, returns the definition of the specified function. For builtin functions, returns the name of the function.

+
Stable
pg_get_indexdef(index_oid: oid) → string

Gets the CREATE INDEX command for index

+
Stable
pg_get_indexdef(index_oid: oid, column_no: int, pretty_bool: bool) → string

Gets the CREATE INDEX command for index, or definition of just one index column when given a non-zero column number

+
Stable
pg_get_serial_sequence(table_name: string, column_name: string) → string

Returns the name of the sequence used by the given column_name in the table table_name.

+
Stable
pg_get_triggerdef(trigger_oid: oid) → string

Returns the CREATE TRIGGER statement for the specified trigger.

+
Stable
pg_get_triggerdef(trigger_oid: oid, pretty_bool: bool) → string

Returns the CREATE TRIGGER statement for the specified trigger.

+
Stable
pg_get_viewdef(view_oid: oid) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_get_viewdef(view_oid: oid, pretty_bool: bool) → string

Returns the CREATE statement for an existing view.

+
Stable
pg_has_role(role: string, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(role: oid, privilege: string) → bool

Returns whether or not the current user has privileges for role.

+
Stable
pg_has_role(user: string, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: string, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: string, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_has_role(user: oid, role: oid, privilege: string) → bool

Returns whether or not the user has privileges for role.

+
Stable
pg_is_other_temp_schema(oid: oid) → bool

Returns true if the given OID is the OID of another session’s temporary schema. (This can be useful, for example, to exclude other sessions’ temporary tables from a catalog display.)

+
Stable
pg_my_temp_schema() → oid

Returns the OID of the current session’s temporary schema, or zero if it has none (because it has not created any temporary tables).

+
Stable
pg_relation_is_updatable(reloid: oid, include_triggers: bool) → int4

Returns the update events the relation supports.

+
Stable
pg_sequence_last_value(sequence_oid: oid) → int

Returns the last value generated by a sequence, or NULL if the sequence has not been used yet.

+
Volatile
pg_sleep(seconds: float) → bool

pg_sleep makes the current session’s process sleep until seconds seconds have elapsed. seconds is a value of type double precision, so fractional-second delays can be specified.

+
Volatile
pg_table_is_visible(oid: oid) → bool

Returns whether the table with the given OID belongs to one of the schemas on the search path.

+
Stable
pg_trigger_depth() → int

Returns the current nesting level of PostgreSQL triggers (0 if not called, directly or indirectly, from inside a trigger).

+
Volatile
pg_type_is_visible(oid: oid) → bool

Returns whether the type with the given OID belongs to one of the schemas on the search path.

+
Stable
set_config(setting_name: string, new_value: string, is_local: bool) → string

System info

+
Volatile
shobj_description(object_oid: oid, catalog_name: string) → string

Returns the comment for a shared database object specified by its OID and the name of the containing system catalog. This is just like obj_description except that it is used for retrieving comments on shared objects (e.g. databases).

+
Stable
+ diff --git a/src/current/_includes/cockroach-generated/release-26.2/sql/operators.md b/src/current/_includes/cockroach-generated/release-26.2/sql/operators.md new file mode 100644 index 00000000000..e79484b0754 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/sql/operators.md @@ -0,0 +1,684 @@ + + + + + +
#Return
int # intint
varbit # varbitvarbit
+ + + + +
#>Return
jsonb #> string[]jsonb
+ + + + +
#>>Return
jsonb #>> string[]string
+ + + + + + + + + +
%Return
decimal % decimaldecimal
decimal % intdecimal
float % floatfloat
int % decimaldecimal
int % intint
string % stringbool
+ + + + + + +
&Return
inet & inetinet
int & intint
varbit & varbitvarbit
+ + + + + + + + + +
&&Return
anyelement && anyelementbool
box2d && box2dbool
box2d && geometrybool
geometry && box2dbool
geometry && geometrybool
inet && inetbool
+ + + + + + + + + + + + + + + +
*Return
decimal * decimaldecimal
decimal * intdecimal
decimal * intervalinterval
float * floatfloat
float * intervalinterval
int * decimaldecimal
int * intint
int * intervalinterval
interval * decimalinterval
interval * floatinterval
interval * intinterval
vector * vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Return
+decimaldecimal
+floatfloat
+intint
+intervalinterval
date + intdate
date + intervaltimestamp
date + timetimestamp
date + timetztimestamptz
decimal + decimaldecimal
decimal + intdecimal
decimal + pg_lsnpg_lsn
float + floatfloat
inet + intinet
int + datedate
int + decimaldecimal
int + inetinet
int + intint
interval + datetimestamp
interval + intervalinterval
interval + timetime
interval + timestamptimestamp
interval + timestamptztimestamptz
interval + timetztimetz
pg_lsn + decimalpg_lsn
time + datetimestamp
time + intervaltime
timestamp + intervaltimestamp
timestamptz + intervaltimestamptz
timetz + datetimestamptz
timetz + intervaltimetz
vector + vectorvector
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-Return
-decimaldecimal
-floatfloat
-intint
-intervalinterval
date - dateint
date - intdate
date - intervaltimestamp
date - timetimestamp
decimal - decimaldecimal
decimal - intdecimal
float - floatfloat
inet - inetint
inet - intinet
int - decimaldecimal
int - intint
interval - intervalinterval
jsonb - intjsonb
jsonb - stringjsonb
jsonb - string[]jsonb
pg_lsn - decimalpg_lsn
pg_lsn - pg_lsndecimal
time - intervaltime
time - timeinterval
timestamp - intervaltimestamp
timestamp - timestampinterval
timestamp - timestamptzinterval
timestamptz - intervaltimestamptz
timestamptz - timestampinterval
timestamptz - timestamptzinterval
timetz - intervaltimetz
vector - vectorvector
+ + + + + +
->Return
jsonb -> intjsonb
jsonb -> stringjsonb
+ + + + + +
->>Return
jsonb ->> intstring
jsonb ->> stringstring
+ + + + + + + + + + +
/Return
decimal / decimaldecimal
decimal / intdecimal
float / floatfloat
int / decimaldecimal
int / intdecimal
interval / floatinterval
interval / intinterval
+ + + + + + + + +
//Return
decimal // decimaldecimal
decimal // intdecimal
float // floatfloat
int // decimaldecimal
int // intint
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<Return
anyenum < anyenumbool
bool < boolbool
bool[] < bool[]bool
box2d < box2dbool
bpchar < bpcharbool
bytes < bytesbool
bytes[] < bytes[]bool
collatedstring < collatedstringbool
collatedstring{*} < collatedstring{*}bool
date < datebool
date < timestampbool
date < timestamptzbool
date[] < date[]bool
decimal < decimalbool
decimal < floatbool
decimal < intbool
decimal[] < decimal[]bool
float < decimalbool
float < floatbool
float < intbool
float[] < float[]bool
geography < geographybool
geometry < geometrybool
inet < inetbool
inet[] < inet[]bool
int < decimalbool
int < floatbool
int < intbool
int < oidbool
int[] < int[]bool
interval < intervalbool
interval[] < interval[]bool
jsonb < jsonbbool
ltree < ltreebool
oid < intbool
oid < oidbool
pg_lsn < pg_lsnbool
refcursor < refcursorbool
string < stringbool
string[] < string[]bool
time < timebool
time < timetzbool
time[] < time[]bool
timestamp < datebool
timestamp < timestampbool
timestamp < timestamptzbool
timestamp[] < timestamp[]bool
timestamptz < datebool
timestamptz < timestampbool
timestamptz < timestamptzbool
timestamptz < timestamptzbool
timetz < timebool
timetz < timetzbool
tuple < tuplebool
uuid < uuidbool
uuid[] < uuid[]bool
varbit < varbitbool
vector < vectorbool
+ + + + +
<#>Return
vector <#> vectorfloat
+ + + + +
<->Return
vector <-> vectorfloat
+ + + + + + +
<<Return
inet << inetbool
int << intint
varbit << intvarbit
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
<=Return
anyenum <= anyenumbool
bool <= boolbool
bool[] <= bool[]bool
box2d <= box2dbool
bpchar <= bpcharbool
bytes <= bytesbool
bytes[] <= bytes[]bool
collatedstring <= collatedstringbool
collatedstring{*} <= collatedstring{*}bool
date <= datebool
date <= timestampbool
date <= timestamptzbool
date[] <= date[]bool
decimal <= decimalbool
decimal <= floatbool
decimal <= intbool
decimal[] <= decimal[]bool
float <= decimalbool
float <= floatbool
float <= intbool
float[] <= float[]bool
geography <= geographybool
geometry <= geometrybool
inet <= inetbool
inet[] <= inet[]bool
int <= decimalbool
int <= floatbool
int <= intbool
int <= oidbool
int[] <= int[]bool
interval <= intervalbool
interval[] <= interval[]bool
jsonb <= jsonbbool
ltree <= ltreebool
oid <= intbool
oid <= oidbool
pg_lsn <= pg_lsnbool
refcursor <= refcursorbool
string <= stringbool
string[] <= string[]bool
time <= timebool
time <= timetzbool
time[] <= time[]bool
timestamp <= datebool
timestamp <= timestampbool
timestamp <= timestamptzbool
timestamp[] <= timestamp[]bool
timestamptz <= datebool
timestamptz <= timestampbool
timestamptz <= timestamptzbool
timestamptz <= timestamptzbool
timetz <= timebool
timetz <= timetzbool
tuple <= tuplebool
uuid <= uuidbool
uuid[] <= uuid[]bool
varbit <= varbitbool
vector <= vectorbool
+ + + + +
<=>Return
vector <=> vectorfloat
+ + + + + + +
<@Return
anyelement <@ anyelementbool
jsonb <@ jsonbbool
ltree <@ ltreebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
=Return
anyenum = anyenumbool
bool = boolbool
bool[] = bool[]bool
box2d = box2dbool
bpchar = bpcharbool
bytes = bytesbool
bytes[] = bytes[]bool
collatedstring = collatedstringbool
collatedstring{*} = collatedstring{*}bool
date = datebool
date = timestampbool
date = timestamptzbool
date[] = date[]bool
decimal = decimalbool
decimal = floatbool
decimal = intbool
decimal[] = decimal[]bool
float = decimalbool
float = floatbool
float = intbool
float[] = float[]bool
geography = geographybool
geometry = geometrybool
inet = inetbool
inet[] = inet[]bool
int = decimalbool
int = floatbool
int = intbool
int = oidbool
int[] = int[]bool
interval = intervalbool
interval[] = interval[]bool
jsonb = jsonbbool
ltree = ltreebool
oid = intbool
oid = oidbool
pg_lsn = pg_lsnbool
refcursor = refcursorbool
string = stringbool
string[] = string[]bool
time = timebool
time = timetzbool
time[] = time[]bool
timestamp = datebool
timestamp = timestampbool
timestamp = timestamptzbool
timestamp[] = timestamp[]bool
timestamptz = datebool
timestamptz = timestampbool
timestamptz = timestamptzbool
timestamptz = timestamptzbool
timetz = timebool
timetz = timetzbool
tsquery = tsquerybool
tsvector = tsvectorbool
tuple = tuplebool
uuid = uuidbool
uuid[] = uuid[]bool
varbit = varbitbool
vector = vectorbool
+ + + + + + +
>>Return
inet >> inetbool
int >> intint
varbit >> intvarbit
+ + + + +
?Return
jsonb ? stringbool
+ + + + +
?&Return
jsonb ?& string[]bool
+ + + + +
?<@Return
ltree ?<@ ltreeltree
+ + + + +
?@>Return
ltree ?@> ltreeltree
+ + + + +
?|Return
jsonb ?| string[]bool
+ + + + + + +
@>Return
anyelement @> anyelementbool
jsonb @> jsonbbool
ltree @> ltreebool
+ + + + + +
@@Return
tsquery @@ tsvectorbool
tsvector @@ tsquerybool
+ + + + +
ILIKEReturn
string ILIKE stringbool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
INReturn
anyenum IN tuplebool
bool IN tuplebool
box2d IN tuplebool
bpchar IN tuplebool
bytes IN tuplebool
collatedstring IN tuplebool
date IN tuplebool
decimal IN tuplebool
float IN tuplebool
geography IN tuplebool
geometry IN tuplebool
inet IN tuplebool
int IN tuplebool
interval IN tuplebool
jsonb IN tuplebool
ltree IN tuplebool
oid IN tuplebool
pg_lsn IN tuplebool
refcursor IN tuplebool
string IN tuplebool
time IN tuplebool
timestamp IN tuplebool
timestamptz IN tuplebool
timetz IN tuplebool
tuple IN tuplebool
uuid IN tuplebool
varbit IN tuplebool
vector IN tuplebool
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IS NOT DISTINCT FROMReturn
anyelement IS NOT DISTINCT FROM unknownbool
anyenum IS NOT DISTINCT FROM anyenumbool
bool IS NOT DISTINCT FROM boolbool
bool[] IS NOT DISTINCT FROM bool[]bool
box2d IS NOT DISTINCT FROM box2dbool
bpchar IS NOT DISTINCT FROM bpcharbool
bytes IS NOT DISTINCT FROM bytesbool
bytes[] IS NOT DISTINCT FROM bytes[]bool
collatedstring IS NOT DISTINCT FROM collatedstringbool
collatedstring{*} IS NOT DISTINCT FROM collatedstring{*}bool
date IS NOT DISTINCT FROM datebool
date IS NOT DISTINCT FROM timestampbool
date IS NOT DISTINCT FROM timestamptzbool
date[] IS NOT DISTINCT FROM date[]bool
decimal IS NOT DISTINCT FROM decimalbool
decimal IS NOT DISTINCT FROM floatbool
decimal IS NOT DISTINCT FROM intbool
decimal[] IS NOT DISTINCT FROM decimal[]bool
float IS NOT DISTINCT FROM decimalbool
float IS NOT DISTINCT FROM floatbool
float IS NOT DISTINCT FROM intbool
float[] IS NOT DISTINCT FROM float[]bool
geography IS NOT DISTINCT FROM geographybool
geometry IS NOT DISTINCT FROM geometrybool
inet IS NOT DISTINCT FROM inetbool
inet[] IS NOT DISTINCT FROM inet[]bool
int IS NOT DISTINCT FROM decimalbool
int IS NOT DISTINCT FROM floatbool
int IS NOT DISTINCT FROM intbool
int IS NOT DISTINCT FROM oidbool
int[] IS NOT DISTINCT FROM int[]bool
interval IS NOT DISTINCT FROM intervalbool
interval[] IS NOT DISTINCT FROM interval[]bool
jsonb IS NOT DISTINCT FROM jsonbbool
jsonpath IS NOT DISTINCT FROM jsonpathbool
ltree IS NOT DISTINCT FROM ltreebool
oid IS NOT DISTINCT FROM intbool
oid IS NOT DISTINCT FROM oidbool
pg_lsn IS NOT DISTINCT FROM pg_lsnbool
refcursor IS NOT DISTINCT FROM refcursorbool
string IS NOT DISTINCT FROM stringbool
string[] IS NOT DISTINCT FROM string[]bool
time IS NOT DISTINCT FROM timebool
time IS NOT DISTINCT FROM timetzbool
time[] IS NOT DISTINCT FROM time[]bool
timestamp IS NOT DISTINCT FROM datebool
timestamp IS NOT DISTINCT FROM timestampbool
timestamp IS NOT DISTINCT FROM timestamptzbool
timestamp[] IS NOT DISTINCT FROM timestamp[]bool
timestamptz IS NOT DISTINCT FROM datebool
timestamptz IS NOT DISTINCT FROM timestampbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timestamptz IS NOT DISTINCT FROM timestamptzbool
timetz IS NOT DISTINCT FROM timebool
timetz IS NOT DISTINCT FROM timetzbool
tsquery IS NOT DISTINCT FROM tsquerybool
tsvector IS NOT DISTINCT FROM tsvectorbool
tuple IS NOT DISTINCT FROM tuplebool
unknown IS NOT DISTINCT FROM unknownbool
unknown IS NOT DISTINCT FROM voidbool
uuid IS NOT DISTINCT FROM uuidbool
uuid[] IS NOT DISTINCT FROM uuid[]bool
varbit IS NOT DISTINCT FROM varbitbool
vector IS NOT DISTINCT FROM vectorbool
void IS NOT DISTINCT FROM unknownbool
+ + + + + +
LIKEReturn
collatedstring LIKE collatedstringbool
string LIKE stringbool
+ + + + +
SIMILAR TOReturn
string SIMILAR TO stringbool
+ + + + + + + + +
^Return
decimal ^ decimaldecimal
decimal ^ intdecimal
float ^ floatfloat
int ^ decimaldecimal
int ^ intint
+ + + + + + +
|Return
inet | inetinet
int | intint
varbit | varbitvarbit
+ + + + + +
|/Return
|/decimaldecimal
|/floatfloat
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
||Return
bool || bool[]bool[]
bool || stringstring
bool[] || boolbool[]
bool[] || bool[]bool[]
box2d || box2dbox2d
box2d || stringstring
bytes || bytesbytes
bytes || bytes[]bytes[]
bytes[] || bytesbytes[]
bytes[] || bytes[]bytes[]
date || date[]date[]
date || stringstring
date[] || datedate[]
date[] || date[]date[]
decimal || decimal[]decimal[]
decimal || stringstring
decimal[] || decimaldecimal[]
decimal[] || decimal[]decimal[]
float || float[]float[]
float || stringstring
float[] || floatfloat[]
float[] || float[]float[]
geography || geographygeography
geography || stringstring
geometry || geometrygeometry
geometry || stringstring
inet || inet[]inet[]
inet || stringstring
inet[] || inetinet[]
inet[] || inet[]inet[]
int || int[]int[]
int || stringstring
int[] || intint[]
int[] || int[]int[]
interval || interval[]interval[]
interval || stringstring
interval[] || intervalinterval[]
interval[] || interval[]interval[]
jsonb || jsonbjsonb
jsonb || stringstring
ltree || ltreeltree
ltree || stringstring
oid || oidoid
oid || stringstring
pg_lsn || pg_lsnpg_lsn
pg_lsn || stringstring
refcursor || refcursorrefcursor
refcursor || stringstring
string || boolstring
string || box2dstring
string || datestring
string || decimalstring
string || floatstring
string || geographystring
string || geometrystring
string || inetstring
string || intstring
string || intervalstring
string || jsonbstring
string || ltreestring
string || oidstring
string || pg_lsnstring
string || refcursorstring
string || stringstring
string || string[]string[]
string || timestring
string || timestampstring
string || timestamptzstring
string || timetzstring
string || tuplestring
string || uuidstring
string || varbitstring
string[] || stringstring[]
string[] || string[]string[]
time || stringstring
time || time[]time[]
time[] || timetime[]
time[] || time[]time[]
timestamp || stringstring
timestamp || timestamp[]timestamp[]
timestamp[] || timestamptimestamp[]
timestamp[] || timestamp[]timestamp[]
timestamptz || stringstring
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timestamptz || timestamptztimestamptz
timetz || stringstring
timetz || timetztimetz
tuple || stringstring
uuid || stringstring
uuid || uuid[]uuid[]
uuid[] || uuiduuid[]
uuid[] || uuid[]uuid[]
varbit || stringstring
varbit || varbitvarbit
+ + + + + +
||/Return
||/decimaldecimal
||/floatfloat
+ + + + + + + + + + + +
~Return
~inetinet
~intint
~varbitvarbit
box2d ~ box2dbool
box2d ~ geometrybool
geometry ~ box2dbool
geometry ~ geometrybool
string ~ stringbool
+ + + + +
~*Return
string ~* stringbool
diff --git a/src/current/_includes/cockroach-generated/release-26.2/sql/window_functions.md b/src/current/_includes/cockroach-generated/release-26.2/sql/window_functions.md new file mode 100644 index 00000000000..321cc02ccf9 --- /dev/null +++ b/src/current/_includes/cockroach-generated/release-26.2/sql/window_functions.md @@ -0,0 +1,431 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Function → ReturnsDescriptionVolatility
cume_dist() → float

Calculates the relative rank of the current row: (number of rows preceding or peer with current row) / (total rows).

+
Immutable
dense_rank() → int

Calculates the rank of the current row without gaps; this function counts peer groups.

+
Immutable
first_value(val: bool) → bool

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: bytes) → bytes

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: date) → date

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: decimal) → decimal

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: float) → float

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: inet) → inet

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: int) → int

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: interval) → interval

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: string) → string

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: time) → time

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: uuid) → uuid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: box2d) → box2d

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geography) → geography

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: geometry) → geometry

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: ltree) → ltree

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: oid) → oid

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: timetz) → timetz

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
first_value(val: varbit) → varbit

Returns val evaluated at the row that is the first row of the window frame.

+
Immutable
lag(val: bool) → bool

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: bytes) → bytes

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: date) → date

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: date, n: int) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: decimal) → decimal

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: float) → float

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: float, n: int) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: inet) → inet

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: int) → int

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: int, n: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: interval) → interval

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: string) → string

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: string, n: int) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: time) → time

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: time, n: int) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamp) → timestamp

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz) → timestamptz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: uuid) → uuid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: box2d) → box2d

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geography) → geography

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: geometry) → geometry

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: jsonb) → jsonb

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: ltree) → ltree

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: ltree, n: int) → ltree

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: ltree, n: int, default: ltree) → ltree

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: oid) → oid

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn) → pg_lsn

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: refcursor) → refcursor

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: timetz) → timetz

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lag(val: varbit) → varbit

Returns val evaluated at the previous row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lag(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lag(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows before the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
last_value(val: bool) → bool

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: bytes) → bytes

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: date) → date

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: decimal) → decimal

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: float) → float

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: inet) → inet

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: int) → int

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: interval) → interval

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: string) → string

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: time) → time

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamp) → timestamp

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timestamptz) → timestamptz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: uuid) → uuid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: box2d) → box2d

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geography) → geography

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: geometry) → geometry

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: jsonb) → jsonb

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: ltree) → ltree

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: oid) → oid

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: pg_lsn) → pg_lsn

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: refcursor) → refcursor

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: timetz) → timetz

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
last_value(val: varbit) → varbit

Returns val evaluated at the row that is the last row of the window frame.

+
Immutable
lead(val: bool) → bool

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bool, n: int) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bool, n: int, default: bool) → bool

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: bytes) → bytes

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: bytes, n: int) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: bytes, n: int, default: bytes) → bytes

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: date) → date

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: date, n: int) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: date, n: int, default: date) → date

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: decimal) → decimal

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: decimal, n: int) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: decimal, n: int, default: decimal) → decimal

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: float) → float

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: float, n: int) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: float, n: int, default: float) → float

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: inet) → inet

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: inet, n: int) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: inet, n: int, default: inet) → inet

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: int) → int

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: int, n: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: int, n: int, default: int) → int

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: interval) → interval

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: interval, n: int) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: interval, n: int, default: interval) → interval

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: string) → string

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: string, n: int) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: string, n: int, default: string) → string

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: time) → time

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: time, n: int) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: time, n: int, default: time) → time

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamp) → timestamp

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamp, n: int, default: timestamp) → timestamp

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz) → timestamptz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timestamptz, n: int, default: timestamptz) → timestamptz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: uuid) → uuid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: uuid, n: int) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: uuid, n: int, default: uuid) → uuid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: box2d) → box2d

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: box2d, n: int) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: box2d, n: int, default: box2d) → box2d

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geography) → geography

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geography, n: int) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geography, n: int, default: geography) → geography

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: geometry) → geometry

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: geometry, n: int) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: geometry, n: int, default: geometry) → geometry

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: jsonb) → jsonb

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: jsonb, n: int, default: jsonb) → jsonb

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: ltree) → ltree

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: ltree, n: int) → ltree

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: ltree, n: int, default: ltree) → ltree

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: oid) → oid

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: oid, n: int) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: oid, n: int, default: oid) → oid

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn) → pg_lsn

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: pg_lsn, n: int, default: pg_lsn) → pg_lsn

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: refcursor) → refcursor

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: refcursor, n: int, default: refcursor) → refcursor

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: timetz) → timetz

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: timetz, n: int) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: timetz, n: int, default: timetz) → timetz

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
lead(val: varbit) → varbit

Returns val evaluated at the following row within current row’s partition; if there is no such row, instead returns null.

+
Immutable
lead(val: varbit, n: int) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such row, instead returns null. n is evaluated with respect to the current row.

+
Immutable
lead(val: varbit, n: int, default: varbit) → varbit

Returns val evaluated at the row that is n rows after the current row within its partition; if there is no such, row, instead returns default (which must be of the same type as val). Both n and default are evaluated with respect to the current row.

+
Immutable
nth_value(val: bool, n: int) → bool

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: bytes, n: int) → bytes

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: date, n: int) → date

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: decimal, n: int) → decimal

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: float, n: int) → float

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: inet, n: int) → inet

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: int, n: int) → int

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: interval, n: int) → interval

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: string, n: int) → string

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: time, n: int) → time

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamp, n: int) → timestamp

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timestamptz, n: int) → timestamptz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: uuid, n: int) → uuid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: box2d, n: int) → box2d

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geography, n: int) → geography

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: geometry, n: int) → geometry

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: jsonb, n: int) → jsonb

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: ltree, n: int) → ltree

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: oid, n: int) → oid

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: pg_lsn, n: int) → pg_lsn

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: refcursor, n: int) → refcursor

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: timetz, n: int) → timetz

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
nth_value(val: varbit, n: int) → varbit

Returns val evaluated at the row that is the nth row of the window frame (counting from 1); null if no such row.

+
Immutable
ntile(n: int) → int

Calculates an integer ranging from 1 to n, dividing the partition as equally as possible.

+
Immutable
percent_rank() → float

Calculates the relative rank of the current row: (rank - 1) / (total rows - 1).

+
Immutable
rank() → int

Calculates the rank of the current row with gaps; same as row_number of its first peer.

+
Immutable
row_number() → int

Calculates the number of the current row within its partition, counting from 1.

+
Immutable
+ diff --git a/src/current/_includes/releases/new-release-downloads-docker-image.md b/src/current/_includes/releases/new-release-downloads-docker-image.md index b9c6218832d..dea4008dc4f 100644 --- a/src/current/_includes/releases/new-release-downloads-docker-image.md +++ b/src/current/_includes/releases/new-release-downloads-docker-image.md @@ -96,11 +96,11 @@ docker pull {{ release.docker.docker_image }}:{{ release.release_name }}

Source tag

-To view or download the source code for CockroachDB {{ release.release_name }} on Github, visit {{ release.release_name }} source tag. +To view or download the source code for CockroachDB {{ release.release_name }} on Github, visit {{ release.release_name }} source tag. {% endif %} {% if release.previous_release %}

Changelog

-View a detailed changelog on GitHub: [{{ release.previous_release }}...{{ release.release_name }}](https://github.com/cockroachdb/cockroach/compare/{{ release.previous_release }}...{{ release.release_name }}) +View a detailed changelog on GitHub: {{ release.previous_release }}...{{ release.release_name }} {% endif %} diff --git a/src/current/_includes/releases/release-downloads-docker-image.md b/src/current/_includes/releases/release-downloads-docker-image.md index 95a811c3d33..b47dcd95151 100644 --- a/src/current/_includes/releases/release-downloads-docker-image.md +++ b/src/current/_includes/releases/release-downloads-docker-image.md @@ -41,7 +41,7 @@ Experimental downloads are not qualified for production use and not eligible for -Source {% if release.has_sql_only == true %} @@ -63,7 +63,7 @@ Experimental downloads are not qualified for production use and not eligible for {% endif %} -Source {% endif %} @@ -97,7 +97,7 @@ docker pull {{ release.docker.docker_image }}:{{ release.release_name }} {% if release.previous_release %}

Changelog

-View a detailed changelog on GitHub: [{{ release.previous_release }}...{{ release.release_name }}](https://github.com/cockroachdb/cockroach/compare/{{ release.previous_release }}...{{ release.release_name }}) +View a detailed changelog on GitHub: {{ release.previous_release }}...{{ release.release_name }} {% endif %} {% endif %} diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.1.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.1.md index a393b032646..b8e3f09b3e5 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.1.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.1.md @@ -6,129 +6,129 @@ Release Date: December 19, 2022

Backward-incompatible changes

-- Replaced the `cdc_prev()` [function]({% link v23.1/functions-and-operators.md %}) in favor of a `cdc_prev` tuple. This is an incompatible change that may break [changefeeds]({% link v23.1/change-data-capture-overview.md %}) that use the previous `cdc_prev()` function. [#85177][#85177] -- [`SHOW RANGES FOR TABLE`]({% link v23.1/show-ranges.md %}) now includes rows for all indexes that support the table. Prior to this change, `SHOW RANGES FOR TABLE foo` was an alias for `SHOW RANGES FOR INDEX foo@primary`. This was causing confusion, as it would miss data for secondary indexes. It is still possible to filter to just the primary index using `SHOW RANGES FOR INDEX foo@primary`. The statement output now also includes the index name. [#93545][#93545] +- Replaced the `cdc_prev()` [function]({% link v23.1/functions-and-operators.md %}) in favor of a `cdc_prev` tuple. This is an incompatible change that may break [changefeeds]({% link v23.1/change-data-capture-overview.md %}) that use the previous `cdc_prev()` function. #85177 +- [`SHOW RANGES FOR TABLE`]({% link v23.1/show-ranges.md %}) now includes rows for all indexes that support the table. Prior to this change, `SHOW RANGES FOR TABLE foo` was an alias for `SHOW RANGES FOR INDEX foo@primary`. This was causing confusion, as it would miss data for secondary indexes. It is still possible to filter to just the primary index using `SHOW RANGES FOR INDEX foo@primary`. The statement output now also includes the index name. #93545

Security updates

-- It is now possible to create [TLS client certificates](https://www.cockroachlabs.com/docs/v23.1/security-reference/transport-layer-security) for identity principals that are not a valid SQL username using [`cockroach cert create-client`]({% link v23.1/cockroach-cert.md %}) and the new flag `--disable-username-validation`. This is meant to be used in combination with the [cluster setting]({% link v23.1/cluster-settings.md %}) `server.identity_map.configuration` and the `map` option in HBA rules (`server.host_based_authentication.configuration`). To test this feature, use `cockroach sql`. Cockroach Labs recommends passing the username separately from the connection URL. [#90439][#90439] -- There is a new SQL [session variable]({% link v23.1/set-vars.md %}) `system_identity` defined to contain the identity principal presented by the SQL clients during the initial connection. This may be different from `session_user` when the identity was mapped, either using GSSAPI and `include_realm=0`, or when using an identity map. [#90439][#90439] +- It is now possible to create [TLS client certificates](https://www.cockroachlabs.com/docs/v23.1/security-reference/transport-layer-security) for identity principals that are not a valid SQL username using [`cockroach cert create-client`]({% link v23.1/cockroach-cert.md %}) and the new flag `--disable-username-validation`. This is meant to be used in combination with the [cluster setting]({% link v23.1/cluster-settings.md %}) `server.identity_map.configuration` and the `map` option in HBA rules (`server.host_based_authentication.configuration`). To test this feature, use `cockroach sql`. Cockroach Labs recommends passing the username separately from the connection URL. #90439 +- There is a new SQL [session variable]({% link v23.1/set-vars.md %}) `system_identity` defined to contain the identity principal presented by the SQL clients during the initial connection. This may be different from `session_user` when the identity was mapped, either using GSSAPI and `include_realm=0`, or when using an identity map. #90439

General changes

-- Upgraded gRPC to v1.49.0 [#88562][#88562] -- The connection timeout for cluster-internal connections between nodes has been reduced to 5s to potentially reduce the impact of network issues. Previously, CockroachDB employed a 20s connection timeout. [#88625][#88625] -- CockroachDB no longer shares a TCP connection for the KV and Gossip subsystems. Each subsystem now uses their own connection, so the total number of outgoing and incoming TCP connections at each node in the cluster will increase by 30 to 50 percent. [#88625][#88625] -- Bulk operations now log the (sanitized) destinations they are connecting to. For example: `backup planning to connect to destination gs://test/backupadhoc?AUTH=specified&CREDENTIALS=redacted`. [#89531][#89531] +- Upgraded gRPC to v1.49.0 #88562 +- The connection timeout for cluster-internal connections between nodes has been reduced to 5s to potentially reduce the impact of network issues. Previously, CockroachDB employed a 20s connection timeout. #88625 +- CockroachDB no longer shares a TCP connection for the KV and Gossip subsystems. Each subsystem now uses their own connection, so the total number of outgoing and incoming TCP connections at each node in the cluster will increase by 30 to 50 percent. #88625 +- Bulk operations now log the (sanitized) destinations they are connecting to. For example: `backup planning to connect to destination gs://test/backupadhoc?AUTH=specified&CREDENTIALS=redacted`. #89531

{{ site.data.products.enterprise }} edition changes

-- [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) no longer reveals Confluent schema registry user information, including a user's secret key. This information is now redacted, meaning it will not be stored in CockroachDB internal tables at all. [#86603][#86603] -- [Changefeeds]({% link v23.1/change-data-capture-overview.md %}) JSON encoder performance is improved by 50%. [#88064][#88064] -- Changefeeds, using cloud storage sink, now have better throughput. [#88395][#88395] -- Added the [cluster setting]({% link v23.1/cluster-settings.md %}) `changefeed.event_consumer_workers` which allows changefeeds to process events concurrently. [#87994][#87994] -- Changefeed) exports are up to 25% faster due to uniform work assignment. [#88672][#88672] -- Changefeeds can emit files compressed with the ZSTD algorithm, which provides good compression and is much faster than GZIP. In addition, a new, faster implementation of GZIP is used by default. [#88635][#88635] -- When a changefeed is run with the option `virtual_columns = "null"`, the virtual column will be ordered last in each row. [#89329][#89329] -- For Kafka [sinks]({% link v23.1/changefeed-sinks.md %}), you can now add the optional JSON field `"Compression"` to the `kafka_sink_config` option. This field can be set to `"none"` (default), `"GZIP"`, `"SNAPPY"`, `"LZ4"`, or `"ZSTD"`. Setting this field will result in the specified compression protocol to be used when emitting events. [#90270][#90270] -- Fixed a bug that could cause changefeeds to fail during a rolling restart. [#89913][#89913] -- Changefeeds will now treat all errors, unless otherwise indicated, as retryable errors. [#90810][#90810] -- CockroachDB now supports passing in the optional external ID when [assuming a role]({% link v23.1/cloud-storage-authentication.md %}). This is done by extending the values of the comma-separated string value of the `ASSUME_ROLE` parameter to the format `;external_id=`. Users can still use the previous format of just `` to specify a role without any external ID. When using role chaining, each role in the chain can be associated with a different external ID. [#91040][#91040] -- [JWT authentication]({% link v23.1/sso-sql.md %}) [cluster settings]({% link v23.1/cluster-settings.md %}) can now be modified from within tenants to better support serverless use cases. [#92406][#92406] -- [CDC queries]({% link v23.1/cdc-queries.md %}) are now planned and evaluated using the SQL optimizer and distSQL execution. The state of the previous row is now exposed as the `cdc_prev` tuple. [#85177][#85177] -- Changefeeds no longer require the `COCKROACH_EXPERIMENTAL_ENABLE_PER_CHANGEFEED_METRICS` environment variable to be set in order to use the `metrics_label` option. [#93423][#93423] -- Changefeeds can now be scheduled at intervals specified in crontab notation. [#92232][#92232] +- [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) no longer reveals Confluent schema registry user information, including a user's secret key. This information is now redacted, meaning it will not be stored in CockroachDB internal tables at all. #86603 +- [Changefeeds]({% link v23.1/change-data-capture-overview.md %}) JSON encoder performance is improved by 50%. #88064 +- Changefeeds, using cloud storage sink, now have better throughput. #88395 +- Added the [cluster setting]({% link v23.1/cluster-settings.md %}) `changefeed.event_consumer_workers` which allows changefeeds to process events concurrently. #87994 +- Changefeed) exports are up to 25% faster due to uniform work assignment. #88672 +- Changefeeds can emit files compressed with the ZSTD algorithm, which provides good compression and is much faster than GZIP. In addition, a new, faster implementation of GZIP is used by default. #88635 +- When a changefeed is run with the option `virtual_columns = "null"`, the virtual column will be ordered last in each row. #89329 +- For Kafka [sinks]({% link v23.1/changefeed-sinks.md %}), you can now add the optional JSON field `"Compression"` to the `kafka_sink_config` option. This field can be set to `"none"` (default), `"GZIP"`, `"SNAPPY"`, `"LZ4"`, or `"ZSTD"`. Setting this field will result in the specified compression protocol to be used when emitting events. #90270 +- Fixed a bug that could cause changefeeds to fail during a rolling restart. #89913 +- Changefeeds will now treat all errors, unless otherwise indicated, as retryable errors. #90810 +- CockroachDB now supports passing in the optional external ID when [assuming a role]({% link v23.1/cloud-storage-authentication.md %}). This is done by extending the values of the comma-separated string value of the `ASSUME_ROLE` parameter to the format `;external_id=`. Users can still use the previous format of just `` to specify a role without any external ID. When using role chaining, each role in the chain can be associated with a different external ID. #91040 +- [JWT authentication]({% link v23.1/sso-sql.md %}) [cluster settings]({% link v23.1/cluster-settings.md %}) can now be modified from within tenants to better support serverless use cases. #92406 +- [CDC queries]({% link v23.1/cdc-queries.md %}) are now planned and evaluated using the SQL optimizer and distSQL execution. The state of the previous row is now exposed as the `cdc_prev` tuple. #85177 +- Changefeeds no longer require the `COCKROACH_EXPERIMENTAL_ENABLE_PER_CHANGEFEED_METRICS` environment variable to be set in order to use the `metrics_label` option. #93423 +- Changefeeds can now be scheduled at intervals specified in crontab notation. #92232

SQL language changes

-- Added the `crdb_internal.to_json_as_changefeed_with_flags` [function]({% link v23.1/functions-and-operators.md %}) to help debug JSON [changefeeds]({% link v23.1/change-data-capture-overview.md %}). [#84509][#84509] -- Added the `regions` column to the [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) command which will output a string of `ALTER DATABASE` commands if the database is a multi-region database and `NULL` for everything else. Previously, the user did not have an easy way to see if a backed up database is multi-region. [#88136][#88136] -- Types with length modifiers can now be used to prefix literals. [#82028][#82028] -- A new column `plan_gist` was added to `crdb_internal.{node,cluster}_queries` representing the compressed logical plan. [#88770][#88770] -- You can generate easy-to-read [`CREATE INDEX`]({% link v23.1/create-index.md %}) statements for a table's (secondary) indexes using the `SHOW CREATE INDEXES FROM ` and `SHOW CREATE SECONDARY INDEXES FROM ` statements. [#88861][#88861] -- `enum_first`, `enum_last`, and `enum_range` may now take `NULL` arguments as long as their type can be inferred from the expression. [#89124][#89124] -- [Declarative schema changer]({% link v23.1/online-schema-changes.md %}) support for `ALTER PRIMARY KEY` statements now extends to tables which have secondary indexes. [#86176][#86176] -- The `backup.restore_span.target_size` cluster setting now defaults to `384 MiB `. This should reduce the number of ranges created during [restore]({% link v23.1/restore.md %}) and thereby reduce the merging of ranges that needs to occur after the restore. [#89333][#89333] -- The `transaction_timeout` [session variable]({% link v23.1/set-vars.md %}) was added. `transaction_timeout` aborts an explicit transaction when it runs longer than the configured duration. When the timer times out, the current statement is cancelled and the transaction enters an aborted state. This timeout does not have any effect when no statement is being executed, so it should be used with `idle_in_transaction_timeout` for the best results. [#89033][#89033] -- The `crdb_internal.check_consistency` [function]({% link v23.1/functions-and-operators.md %}) now does not include the diff between inconsistent replicas, should they occur. If an inconsistency occurs, the storage engine checkpoints should be inspected. This change is made because the maximum range size limit has been increased from 64 MiB to 512 MiB, so inlining diffs in consistency checks does not scale. [#89502][#89502] -- CockroachDB now shows a hash-sharded check constraint in [`SHOW CREATE TABLE`]({% link v23.1/show-create.md %}) if it is set to `NOT VALID`. [#89517][#89517] -- Added the `SHOW FUNCTIONS` and `SHOW FUNCTIONS FROM ` statements, which list [user-defined functions]({% link v23.1/user-defined-functions.md %}). [#89557][#89557] -- The default value of `sql.metrics.statement_details.plan_collection.enabled` is now `false`. [#89847][#89847] -- CockroachDB now supports executing statements of the form `DELETE FROM ... USING`. [#88974][#88974] -- The [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.ttl.default_range_concurrency` and table storage parameter `ttl_range_concurrency` are no longer configurable. [#89392][#89392] -- CockroachDB drops the associated scheduled incremental backup when [`DROP SCHEDULE`]({% link v23.1/drop-schedules.md %}) or `DROP SCHEDULES` is called. Previously, whenever a user dropped a [scheduled full backup]({% link v23.1/create-schedule-for-backup.md %}), the corresponding scheduled incremental backup would not be dropped. [#89768][#89768] -- Added the `sql.auth.change_own_password.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). It defaults to `false`. When set to `true`, any user is allowed to change their own password to a non-null value. Changing other role options still has the same privilege requirements as before (either `CREATEROLE` or `CREATELOGIN`, depending on the option). [#90485][#90485] -- The `sql.distsql.max_running_flows` [cluster setting]({% link v23.1/cluster-settings.md %}) has been removed. [#84888][#84888] -- The query field in the `crdb_internal.node_queries`, `crdb_internal.cluster_queries`, and `SHOW QUERIES` commands now includes the original comments in the queries. [#86968][#86968] -- Added a new `descriptor_validation` [session variable]({% link v23.1/set-vars.md %}) which can be set to `read_only` or `off` to disable descriptor validation, which may be useful when mitigating or recovering from catalog corruption. [#90488][#90488] -- CockroachDB now supports using [`DROP CONSTRAINT, ADD CONSTRAINT`]({% link v23.1/alter-table.md %}#drop-constraint) to add a new primary key without moving the existing primary key to a secondary index if the primary key name is a reserved SQL keyword. Previously, a `constraint already exists` error was returned. [#90865][#90865] -- Added the `contention_events` column to the `cluster_execution_insights` table to see which transaction is blocking the specific statement. [#90660][#90660] -- `crdb_internal.scan` and `crdb_internal.list_sql_keys_in_range` return the timestamp for the time at which the value for a key was written, in addition to the raw key and value. [#90956][#90956] -- Previously, the [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}) value was set at the start of the TTL job (with a TTL cutoff - 30s), but this results in an error for TTL jobs that run longer than `gc.ttlseconds`: `error selecting rows to delete: ttl select defaultdb.public.events: batch timestamp 1666883527.780656000,0 must be after replica GC threshold 1666883574.542825089,0`. CockroachDB now makes the `AS OF SYSTEM TIME` value relative to when each `SELECT` query is run (query time - 30s) to prevent this error from happening, but each `SELECT` query will run against a different table state. This should be ok because if records are missed during one job invocation they should still be picked up the next. [#90981][#90981] -- The `system.sql_instances` table now includes pre-allocated ID entries, where all the fields except `id` will be `NULL`. [#90427][#90427] -- The `to_char(timestamp, string)` [function]({% link v23.1/functions-and-operators.md %}) has been added. [#91382][#91382] -- The `to_char(interval, string)` built-in [function]({% link v23.1/functions-and-operators.md %}) has been added. [#91382][#91382] -- The `system.table_statistics` table now contains a column called `partialPredicate` to store a predicate for a partial statistic collection. [#91248][#91248] -- A new `NumericStat`, `idleLat`, was introduced to the statistics column of `crdb_internal.statement_statistics`. This reports the time spent waiting for the client to send the statement while holding a transaction open. Developers may use this stat to identify opportunities for restructuring their apps to reduce contention. [#91098][#91098] -- The set of supported compression algorithms in compress/decompress built-in [functions]({% link v23.1/functions-and-operators.md %}) is expanded to include `lz4`, `snappy`, and `zstd`. [#91162][#91162] -- A new column `database` was added to `crdb_internal.{node,cluster}_queries` and list sessions [endpoint]({% link v23.1/cluster-api.md %}). [#91629][#91629] -- Fixed a bug in the legacy schema changer where comments were not dropped together with the database. [#91689][#91689] -- The `round(decimal`) built-in [function]({% link v23.1/functions-and-operators.md %}) no longer returns negative 0 for any input. [#86106][#86106] -- Added an estimate for the number of request units consumed by a query to the output of [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}) for tenant sessions. [#89256][#89256] -- Enabled forward indexes on arrays. [#91762][#91762] -- Users can now manually create partial single-column statistics at the extreme values on columns that are prefixes of their index. The output of [`SHOW STATISTICS`]({% link v23.1/show-statistics.md %}) now includes a column indicating the partial predicate for a partial statistic, or `NULL` for a full statistic. [#91228][#91228] -- A new SQL statement `SHOW COMMIT TIMESTAMP` has been added. This statement can be used to retrieve the commit timestamp of the current explicit transaction, current multi-statement implicit transaction, or previous transaction. The statement may be used in a variety of settings to maximize its utility in the face of connection pooling. When used as a part of an explicit transaction, the statement implicitly commits the transaction internally before being able to return a causality token. [#80848][#80848] -- Added support for the `pg_blocking_pids` built-in [function]({% link v23.1/functions-and-operators.md %}). It is hardcoded to return an empty array because CockroachDB has no equivalent concept of PIDs as in PostgreSQL. [#92253][#92253] -- Added a list of indexes used by the query on the statistics column on the `system.statement_statistics` and `crdb_internal.statement_statistics` tables. The format is `tableID@indexID`. [#92351][#92351] -- Added a list of used indexes (with the format `tableID@indexID`) to the sampled query telemetry log. [#92464][#92464] -- A new `NumericStat`, `idleLat`, was introduced to the statistics column of `crdb_internal.transaction_statistics`. It reports the time spent waiting for the client to send statements while holding a transaction open. [#92695][#92695] -- Added an in-memory-only evaluation of `tsvector` and `tsquery` datatypes and the `@@` matches operator. [#90842][#90842] -- Implemented the [`ALTER TABLE ... ADD CHECK`]({% link v23.1/alter-table.md %}) statement in the [declarative schema changer]({% link v23.1/online-schema-changes.md %}). [#91153][#91153] -- `to_char` now has caching for parse formats, marking a speedup when running `to_char` with the same format between sessions. [#91564][#91564] -- Casts from index name to `REGCLASS` are now supported. Previously, only table names could be cast to `REGCLASS`. [#90649][#90649] -- Added user-defined composite column types. [#90491][#90491] -- SQL queries running on remote nodes now show up in CPU profiles with `distsql.*` labels. Currently this includes `appname`, `gateway`, `txn`, and stmt. [#92775][#92775] -- CockroachDB now permits non-indexed storage of `tsvector` and `tsquery` datatypes [#92957][#92957] -- Implemented the `parse_ident` built-in [function]({% link v23.1/functions-and-operators.md %}), which splits a qualified identifier into an array of identifiers, removing any quoting of individual identifiers. By default, extra characters after the last identifier are considered an error; but if the second parameter is `false`, then such extra characters are ignored. [#93300][#93300] +- Added the `crdb_internal.to_json_as_changefeed_with_flags` [function]({% link v23.1/functions-and-operators.md %}) to help debug JSON [changefeeds]({% link v23.1/change-data-capture-overview.md %}). #84509 +- Added the `regions` column to the [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) command which will output a string of `ALTER DATABASE` commands if the database is a multi-region database and `NULL` for everything else. Previously, the user did not have an easy way to see if a backed up database is multi-region. #88136 +- Types with length modifiers can now be used to prefix literals. #82028 +- A new column `plan_gist` was added to `crdb_internal.{node,cluster}_queries` representing the compressed logical plan. #88770 +- You can generate easy-to-read [`CREATE INDEX`]({% link v23.1/create-index.md %}) statements for a table's (secondary) indexes using the `SHOW CREATE INDEXES FROM ` and `SHOW CREATE SECONDARY INDEXES FROM ` statements. #88861 +- `enum_first`, `enum_last`, and `enum_range` may now take `NULL` arguments as long as their type can be inferred from the expression. #89124 +- [Declarative schema changer]({% link v23.1/online-schema-changes.md %}) support for `ALTER PRIMARY KEY` statements now extends to tables which have secondary indexes. #86176 +- The `backup.restore_span.target_size` cluster setting now defaults to `384 MiB `. This should reduce the number of ranges created during [restore]({% link v23.1/restore.md %}) and thereby reduce the merging of ranges that needs to occur after the restore. #89333 +- The `transaction_timeout` [session variable]({% link v23.1/set-vars.md %}) was added. `transaction_timeout` aborts an explicit transaction when it runs longer than the configured duration. When the timer times out, the current statement is cancelled and the transaction enters an aborted state. This timeout does not have any effect when no statement is being executed, so it should be used with `idle_in_transaction_timeout` for the best results. #89033 +- The `crdb_internal.check_consistency` [function]({% link v23.1/functions-and-operators.md %}) now does not include the diff between inconsistent replicas, should they occur. If an inconsistency occurs, the storage engine checkpoints should be inspected. This change is made because the maximum range size limit has been increased from 64 MiB to 512 MiB, so inlining diffs in consistency checks does not scale. #89502 +- CockroachDB now shows a hash-sharded check constraint in [`SHOW CREATE TABLE`]({% link v23.1/show-create.md %}) if it is set to `NOT VALID`. #89517 +- Added the `SHOW FUNCTIONS` and `SHOW FUNCTIONS FROM ` statements, which list [user-defined functions]({% link v23.1/user-defined-functions.md %}). #89557 +- The default value of `sql.metrics.statement_details.plan_collection.enabled` is now `false`. #89847 +- CockroachDB now supports executing statements of the form `DELETE FROM ... USING`. #88974 +- The [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.ttl.default_range_concurrency` and table storage parameter `ttl_range_concurrency` are no longer configurable. #89392 +- CockroachDB drops the associated scheduled incremental backup when [`DROP SCHEDULE`]({% link v23.1/drop-schedules.md %}) or `DROP SCHEDULES` is called. Previously, whenever a user dropped a [scheduled full backup]({% link v23.1/create-schedule-for-backup.md %}), the corresponding scheduled incremental backup would not be dropped. #89768 +- Added the `sql.auth.change_own_password.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). It defaults to `false`. When set to `true`, any user is allowed to change their own password to a non-null value. Changing other role options still has the same privilege requirements as before (either `CREATEROLE` or `CREATELOGIN`, depending on the option). #90485 +- The `sql.distsql.max_running_flows` [cluster setting]({% link v23.1/cluster-settings.md %}) has been removed. #84888 +- The query field in the `crdb_internal.node_queries`, `crdb_internal.cluster_queries`, and `SHOW QUERIES` commands now includes the original comments in the queries. #86968 +- Added a new `descriptor_validation` [session variable]({% link v23.1/set-vars.md %}) which can be set to `read_only` or `off` to disable descriptor validation, which may be useful when mitigating or recovering from catalog corruption. #90488 +- CockroachDB now supports using [`DROP CONSTRAINT, ADD CONSTRAINT`]({% link v23.1/alter-table.md %}#drop-constraint) to add a new primary key without moving the existing primary key to a secondary index if the primary key name is a reserved SQL keyword. Previously, a `constraint already exists` error was returned. #90865 +- Added the `contention_events` column to the `cluster_execution_insights` table to see which transaction is blocking the specific statement. #90660 +- `crdb_internal.scan` and `crdb_internal.list_sql_keys_in_range` return the timestamp for the time at which the value for a key was written, in addition to the raw key and value. #90956 +- Previously, the [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}) value was set at the start of the TTL job (with a TTL cutoff - 30s), but this results in an error for TTL jobs that run longer than `gc.ttlseconds`: `error selecting rows to delete: ttl select defaultdb.public.events: batch timestamp 1666883527.780656000,0 must be after replica GC threshold 1666883574.542825089,0`. CockroachDB now makes the `AS OF SYSTEM TIME` value relative to when each `SELECT` query is run (query time - 30s) to prevent this error from happening, but each `SELECT` query will run against a different table state. This should be ok because if records are missed during one job invocation they should still be picked up the next. #90981 +- The `system.sql_instances` table now includes pre-allocated ID entries, where all the fields except `id` will be `NULL`. #90427 +- The `to_char(timestamp, string)` [function]({% link v23.1/functions-and-operators.md %}) has been added. #91382 +- The `to_char(interval, string)` built-in [function]({% link v23.1/functions-and-operators.md %}) has been added. #91382 +- The `system.table_statistics` table now contains a column called `partialPredicate` to store a predicate for a partial statistic collection. #91248 +- A new `NumericStat`, `idleLat`, was introduced to the statistics column of `crdb_internal.statement_statistics`. This reports the time spent waiting for the client to send the statement while holding a transaction open. Developers may use this stat to identify opportunities for restructuring their apps to reduce contention. #91098 +- The set of supported compression algorithms in compress/decompress built-in [functions]({% link v23.1/functions-and-operators.md %}) is expanded to include `lz4`, `snappy`, and `zstd`. #91162 +- A new column `database` was added to `crdb_internal.{node,cluster}_queries` and list sessions [endpoint]({% link v23.1/cluster-api.md %}). #91629 +- Fixed a bug in the legacy schema changer where comments were not dropped together with the database. #91689 +- The `round(decimal`) built-in [function]({% link v23.1/functions-and-operators.md %}) no longer returns negative 0 for any input. #86106 +- Added an estimate for the number of request units consumed by a query to the output of [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}) for tenant sessions. #89256 +- Enabled forward indexes on arrays. #91762 +- Users can now manually create partial single-column statistics at the extreme values on columns that are prefixes of their index. The output of [`SHOW STATISTICS`]({% link v23.1/show-statistics.md %}) now includes a column indicating the partial predicate for a partial statistic, or `NULL` for a full statistic. #91228 +- A new SQL statement `SHOW COMMIT TIMESTAMP` has been added. This statement can be used to retrieve the commit timestamp of the current explicit transaction, current multi-statement implicit transaction, or previous transaction. The statement may be used in a variety of settings to maximize its utility in the face of connection pooling. When used as a part of an explicit transaction, the statement implicitly commits the transaction internally before being able to return a causality token. #80848 +- Added support for the `pg_blocking_pids` built-in [function]({% link v23.1/functions-and-operators.md %}). It is hardcoded to return an empty array because CockroachDB has no equivalent concept of PIDs as in PostgreSQL. #92253 +- Added a list of indexes used by the query on the statistics column on the `system.statement_statistics` and `crdb_internal.statement_statistics` tables. The format is `tableID@indexID`. #92351 +- Added a list of used indexes (with the format `tableID@indexID`) to the sampled query telemetry log. #92464 +- A new `NumericStat`, `idleLat`, was introduced to the statistics column of `crdb_internal.transaction_statistics`. It reports the time spent waiting for the client to send statements while holding a transaction open. #92695 +- Added an in-memory-only evaluation of `tsvector` and `tsquery` datatypes and the `@@` matches operator. #90842 +- Implemented the [`ALTER TABLE ... ADD CHECK`]({% link v23.1/alter-table.md %}) statement in the [declarative schema changer]({% link v23.1/online-schema-changes.md %}). #91153 +- `to_char` now has caching for parse formats, marking a speedup when running `to_char` with the same format between sessions. #91564 +- Casts from index name to `REGCLASS` are now supported. Previously, only table names could be cast to `REGCLASS`. #90649 +- Added user-defined composite column types. #90491 +- SQL queries running on remote nodes now show up in CPU profiles with `distsql.*` labels. Currently this includes `appname`, `gateway`, `txn`, and stmt. #92775 +- CockroachDB now permits non-indexed storage of `tsvector` and `tsquery` datatypes #92957 +- Implemented the `parse_ident` built-in [function]({% link v23.1/functions-and-operators.md %}), which splits a qualified identifier into an array of identifiers, removing any quoting of individual identifiers. By default, extra characters after the last identifier are considered an error; but if the second parameter is `false`, then such extra characters are ignored. #93300

Operational changes

-- Reduced the length of the `raft.process.handleready.latency` metric help text to avoid it being rejected by certain Prometheus services. [#87166][#87166] -- Logs produced by increasing the vmodule setting for `s3_storage` are now directed to the `DEV` channel rather than `STDOUT`. [#88798][#88798] -- Added observability for when load-based splitting cannot find a key to indicate the reasons why the load splitter could not find a split key, which provides more insight into why a range is not splitting easier. [#88720][#88720] +- Reduced the length of the `raft.process.handleready.latency` metric help text to avoid it being rejected by certain Prometheus services. #87166 +- Logs produced by increasing the vmodule setting for `s3_storage` are now directed to the `DEV` channel rather than `STDOUT`. #88798 +- Added observability for when load-based splitting cannot find a key to indicate the reasons why the load splitter could not find a split key, which provides more insight into why a range is not splitting easier. #88720 - Added five new fields to the `sampled_query` telemetry events: - `ScanCount`: Number of scans in the query plan. - `ScanWithStatsCount`: Number of scans using statistics (including forecasted statistics) in the query plan. - `ScanWithStatsForecastCount`: Number of scans using forecasted statistics in the query plan. - `TotalScanRowsWithoutForecastsEstimate`: Total number of rows read by all scans in the query, as estimated by the optimizer without using forecasts. - - `NanosSinceStatsForecasted`: The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. [#88539][#88539] -- Added a new debug tool to allow for decrypting files in a store using [encryption-at-rest](https://www.cockroachlabs.com/docs/v23.1/security-reference/encryption). This tool is intended for use while debugging, or for providing debug artifacts to Cockroach Labs to aid with support investigations. It is intended to be run "in-situ" (i.e., on site), as it prevents having to move sensitive key material. [#89668][#89668] -- Added a new command that can be used by an operator to list the files present in the [encryption-at-rest](https://www.cockroachlabs.com/docs/v23.1/security-reference/encryption) file registry. [#89873][#89873] -- Release version binaries can now be instructed via the enviroment variable `COCKROACH_FORCE_DEV_VERSION` to override their cluster version support to match that of development builds, which can allow a release binary to be started in a cluster that is run (or has previously run) a development build. [#90002][#90002] -- The consistency check failure message is now more informative, and suggests a few actions that operators should perform in the unlikely event a failure occurs. [#89899][#89899] -- Updated metric descriptions of `rebalancing.*` to include the recording period. [#90619][#90619] -- CockroachDB now prioritizes non-voters in voter additions, meaning that when selecting a store to add a voter on (in the allocator), CockroachDB will prioritize candidate stores that contain a non-voter replica higher. This reduces the number of snapshots that need to be sent over the WAN. [#89650][#89650] -- CockroachDB now uses response data rather than just the request span in the [load-based splitter]({% link v23.1/load-based-splitting.md %}) to pass more accurate data about the keys iterated over to the load splitter to find a suitable split key, enabling the load splitter to find a split key under heavy range query workloads. [#89217][#89217] -- Added the `replicas.leaders_invalid_lease` metric, which indicates how many replicas are Raft group leaders but holding invalid leases. [#91179][#91179] -- The [cluster settings]({% link v23.1/cluster-settings.md %}) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. [#90789][#90789] -- The [cluster setting]({% link v23.1/cluster-settings.md %}) `server.web_session.auto_logout.timeout` has been removed. [#90789][#90789] -- Splunk dashboard templates are available in the public repository under `/monitoring/splunk-dashboard/`. [#92330][#92330] -- The network timeout for RPC connections between cluster nodes has been reduced from 3 seconds to 2 seconds, with a connection timeout of 4 seconds, in order to reduce unavailability and tail latencies during infrastructure outages. This can now be changed by setting the `COCKROACH_NETWORK_TIMEOUT` environment variable, which defaults to `2s`. [#92542][#92542] -- The Raft election timeout has been reduced from 3 seconds to 2 seconds, and the lease interval from 9 seconds to 6 seconds, with a corresponding reduction in the node heartbeat interval from 4.5 seconds to 3 seconds. This reduces the period of unavailability following leaseholder loss, but places tighter restrictions on network latencies (no more than 500ms roundtrip time). This can be adjusted by setting the `COCKROACH_RAFT_ELECTION_TIMEOUT_TICKS` environment variable, which now defaults to 10 and will scale all of these intervals proportionally. [#91947][#91947] -- The RPC heartbeat and gRPC keepalive ping intervals have been reduced to 1 second to detect failures faster. This is adjustable via the new `COCKROACH_PING_INTERVAL` environment variable. The timeouts remain unchanged. [#93399][#93399] + - `NanosSinceStatsForecasted`: The greatest quantity of nanoseconds that have passed since the forecast time (or until the forecast time, if it is in the future, in which case it will be negative) for any table with forecasted stats scanned by this query. #88539 +- Added a new debug tool to allow for decrypting files in a store using [encryption-at-rest](https://www.cockroachlabs.com/docs/v23.1/security-reference/encryption). This tool is intended for use while debugging, or for providing debug artifacts to Cockroach Labs to aid with support investigations. It is intended to be run "in-situ" (i.e., on site), as it prevents having to move sensitive key material. #89668 +- Added a new command that can be used by an operator to list the files present in the [encryption-at-rest](https://www.cockroachlabs.com/docs/v23.1/security-reference/encryption) file registry. #89873 +- Release version binaries can now be instructed via the enviroment variable `COCKROACH_FORCE_DEV_VERSION` to override their cluster version support to match that of development builds, which can allow a release binary to be started in a cluster that is run (or has previously run) a development build. #90002 +- The consistency check failure message is now more informative, and suggests a few actions that operators should perform in the unlikely event a failure occurs. #89899 +- Updated metric descriptions of `rebalancing.*` to include the recording period. #90619 +- CockroachDB now prioritizes non-voters in voter additions, meaning that when selecting a store to add a voter on (in the allocator), CockroachDB will prioritize candidate stores that contain a non-voter replica higher. This reduces the number of snapshots that need to be sent over the WAN. #89650 +- CockroachDB now uses response data rather than just the request span in the [load-based splitter]({% link v23.1/load-based-splitting.md %}) to pass more accurate data about the keys iterated over to the load splitter to find a suitable split key, enabling the load splitter to find a split key under heavy range query workloads. #89217 +- Added the `replicas.leaders_invalid_lease` metric, which indicates how many replicas are Raft group leaders but holding invalid leases. #91179 +- The [cluster settings]({% link v23.1/cluster-settings.md %}) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. #90789 +- The [cluster setting]({% link v23.1/cluster-settings.md %}) `server.web_session.auto_logout.timeout` has been removed. #90789 +- Splunk dashboard templates are available in the public repository under `/monitoring/splunk-dashboard/`. #92330 +- The network timeout for RPC connections between cluster nodes has been reduced from 3 seconds to 2 seconds, with a connection timeout of 4 seconds, in order to reduce unavailability and tail latencies during infrastructure outages. This can now be changed by setting the `COCKROACH_NETWORK_TIMEOUT` environment variable, which defaults to `2s`. #92542 +- The Raft election timeout has been reduced from 3 seconds to 2 seconds, and the lease interval from 9 seconds to 6 seconds, with a corresponding reduction in the node heartbeat interval from 4.5 seconds to 3 seconds. This reduces the period of unavailability following leaseholder loss, but places tighter restrictions on network latencies (no more than 500ms roundtrip time). This can be adjusted by setting the `COCKROACH_RAFT_ELECTION_TIMEOUT_TICKS` environment variable, which now defaults to 10 and will scale all of these intervals proportionally. #91947 +- The RPC heartbeat and gRPC keepalive ping intervals have been reduced to 1 second to detect failures faster. This is adjustable via the new `COCKROACH_PING_INTERVAL` environment variable. The timeouts remain unchanged. #93399

Command-line changes

-- The interactive [SQL shell]({% link v23.1/cockroach-sql.md %}) now retains a maximum of 1000 entries. There was no limit previously. [#88173][#88173] -- The deprecated CLI command `debug unsafe-remove-dead-replicas` has been removed. Use `debug recover` instead. [#89150][#89150] -- The `\df` metacommand was added to the [SQL shell]({% link v23.1/cockroach-sql.md %}), which will list all [user-defined functions]({% link v23.1/user-defined-functions.md %}) in the current database. [#89557][#89557] -- In the [`kv` workload]({% link v23.1/cockroach-workload.md %}#kv-workload), you can now enable `--splits` with the `--sequential` flag and adjust splitting to uniformly partition the keyspace. [#90000][#90000] -- Added the `--insert-count` flag to insert rows before the [`kv` workload]({% link v23.1/cockroach-workload.md %}#kv-workload) begins. [#90055][#90055] -- CockroachDB will now include recommended remediation actions alongside log messages for some errors. [#82891][#82891] -- The input syntax of `\set` is now more flexible: it is now more accepting of space characters in various positions of the syntax and it supports quoted values, e.g., via `\set prompt1 "a b c"`. [#90520][#90520] -- [`cockroach demo --global`]({% link v23.1/cockroach-demo.md %}) will now start up more quickly. The latency that will be injected will not be injected until after the initial cluster is set up internally. [#92231][#92231] -- The engine used as line editor in the interactive shell ([`cockroach sql`]({% link v23.1/cockroach-sql.md %}) and [`cockroach demo`]({% link v23.1/cockroach-demo.md %})) has been updated. It includes numerous bug fixes and new features. The previous engine can still be accessed by setting the `COCKROACH_SQL_FORCE_LIBEDIT` environment variable to true. This support will be removed in a later version. [#86457][#86457] -- The interactive [SQL shell]({% link v23.1/cockroach-sql.md %}) now supports an advanced debug mode for troubleshooting when `--debug-sql-cli` is specified on the command line. The debug mode can be enabled with Ctrl+@ or Ctrl+_ (Ctrl+space on macOS). [#86457][#86457] +- The interactive [SQL shell]({% link v23.1/cockroach-sql.md %}) now retains a maximum of 1000 entries. There was no limit previously. #88173 +- The deprecated CLI command `debug unsafe-remove-dead-replicas` has been removed. Use `debug recover` instead. #89150 +- The `\df` metacommand was added to the [SQL shell]({% link v23.1/cockroach-sql.md %}), which will list all [user-defined functions]({% link v23.1/user-defined-functions.md %}) in the current database. #89557 +- In the [`kv` workload]({% link v23.1/cockroach-workload.md %}#kv-workload), you can now enable `--splits` with the `--sequential` flag and adjust splitting to uniformly partition the keyspace. #90000 +- Added the `--insert-count` flag to insert rows before the [`kv` workload]({% link v23.1/cockroach-workload.md %}#kv-workload) begins. #90055 +- CockroachDB will now include recommended remediation actions alongside log messages for some errors. #82891 +- The input syntax of `\set` is now more flexible: it is now more accepting of space characters in various positions of the syntax and it supports quoted values, e.g., via `\set prompt1 "a b c"`. #90520 +- [`cockroach demo --global`]({% link v23.1/cockroach-demo.md %}) will now start up more quickly. The latency that will be injected will not be injected until after the initial cluster is set up internally. #92231 +- The engine used as line editor in the interactive shell ([`cockroach sql`]({% link v23.1/cockroach-sql.md %}) and [`cockroach demo`]({% link v23.1/cockroach-demo.md %})) has been updated. It includes numerous bug fixes and new features. The previous engine can still be accessed by setting the `COCKROACH_SQL_FORCE_LIBEDIT` environment variable to true. This support will be removed in a later version. #86457 +- The interactive [SQL shell]({% link v23.1/cockroach-sql.md %}) now supports an advanced debug mode for troubleshooting when `--debug-sql-cli` is specified on the command line. The debug mode can be enabled with Ctrl+@ or Ctrl+_ (Ctrl+space on macOS). #86457 - The following fields have been redacted and were added to the redacted debug zip: - `crdb_internal.create_statements`: - `create_statement` @@ -142,151 +142,151 @@ Release Date: December 19, 2022 - `last_active` - `active_queries` - `crdb_internal.{cluster,node}_queries`: - - `query` [#92263][#92263] + - `query` #92263 -- The interactive [SQL shell]({% link v23.1/cockroach-sql.md %}) now supports a rudimentary form of tab completion to input the name of SQL objects and functions. [#87606][#87606] -- The command-line flag `--empty` to [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) is not marked as deprecated anymore; it is more convenient than `--no-example-database`. However, the latter remains supported as an alias. [#93255][#93255] -- The command-line flags `--logtostderr`, `--log-file-verbosity`, `--no-color`, `--redactable-logs`, `--log-file-max-size`, `--log-group-max-size`, `--log-dir`, `--sql-audit-dir` are not marked as deprecated anymore; instead, they are defined as convenience aliases for various `--log` specifications. [#93255][#93255] -- [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) now supports `--pid-file` for symmetry with [`cockroach start`]({% link v23.1/cockroach-start.md %}). [#93343][#93343] -- The debug utility script `hot-ranges.sh` partitions output by statistics: `queries_per_second`, `writes_per_second`, `read_bytes_per_second`, `write_bytes_per_second`. It also decreased the number of ranges shown under each heading from 20 to 10. [#93528][#93528] +- The interactive [SQL shell]({% link v23.1/cockroach-sql.md %}) now supports a rudimentary form of tab completion to input the name of SQL objects and functions. #87606 +- The command-line flag `--empty` to [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) is not marked as deprecated anymore; it is more convenient than `--no-example-database`. However, the latter remains supported as an alias. #93255 +- The command-line flags `--logtostderr`, `--log-file-verbosity`, `--no-color`, `--redactable-logs`, `--log-file-max-size`, `--log-group-max-size`, `--log-dir`, `--sql-audit-dir` are not marked as deprecated anymore; instead, they are defined as convenience aliases for various `--log` specifications. #93255 +- [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) now supports `--pid-file` for symmetry with [`cockroach start`]({% link v23.1/cockroach-start.md %}). #93343 +- The debug utility script `hot-ranges.sh` partitions output by statistics: `queries_per_second`, `writes_per_second`, `read_bytes_per_second`, `write_bytes_per_second`. It also decreased the number of ranges shown under each heading from 20 to 10. #93528

DB Console changes

-- The High Contention Time insight description now accurately reflects the event's contention duration in the [DB Console]({% link v23.1/ui-overview.md %}). [#89035][#89035] -- **Overview** and **Explain Plan** tabs were added to the **Active Statement Details** page. [#89021][#89021] -- The **Apply** button was added on the **Table Details** page ([DB Console]({% link v23.1/ui-overview.md %}) only) when there is a recommendation to drop an unused index. [#90070][#90070] -- **Overview** and **Explain Plan** tabs were added to the **Statement Insight Details** page. [#90250][#90250] -- The [Jobs Page]({% link v23.1/ui-jobs-page.md %}) now includes a column picker. [#89678][#89678] -- The fingerprint ID values for statements and transactions on the [Insights]({% link v23.1/ui-insights-page.md %}) pages are links that open the respective details page on the time period of the execution of that statement or transaction. [#90403][#90403] -- Requests to fetch table and database statistics now have limited concurrency. This may make loading the [Databases]({% link v23.1/ui-databases-page.md %}) page slower, but in return should result in making those pages less disruptive. [#90210][#90210] -- The **Transaction** filter label on the **SQL Activity** page was fixed. [#91255][#91255] -- The metric graph tooltip styling was fixed to prevent content collapse. [#91290][#91290] -- Index recommendations were added to the **Statement Active Execution Details** page, and the plan gist was added as the first line of the explain plan. [#91629][#91629] -- Transaction insights pages now show insights about slow execution with unknown causes, index recommendations, and failed executions. The following fields have also been added on the **Details** page, but are not available for transactions where the insight is "High Contention" (i.e., user name, session ID, rows processed, rows read, rows written, retries, last retry reason, full scan, and transaction priority). [#91698][#91698] -- The fingerprint ID in hex format was added to the **Statement Details** page and **Transaction Details** page. [#91885][#91885] -- The contention time, schema, database, table, and index info was added to the **Insights Statement Details** page. [#91668][#91668] -- The query column in the insight recommendations table was removed. Instead, the statement is included in the description if the transaction being reported has multiple statements. [#91955][#91955] -- Graphs on the **Metrics** page now downsample using max value instead of average. Previously, zooming out on a graph would cause any spikes in the graph to smooth out, potentially hiding anomalies. These anomalies are now visible even when looking at a zoomed out interval. [#92017][#92017] -- The **Statement Execution** and **Planning Time** chart on the **Statement Fingerprint** page now includes a third value ("Idle") representing the time spent by the application waiting to execute this statement while holding a transaction open. [#92284][#92284] -- A list of used index per explain plan was added, under the **Explain Plan** tab on the **Statement Details** page, with links to the table or index details pages. [#92463][#92463] -- The [Insights]({% link v23.1/ui-insights-page.md %}) pages in the [DB Console]({% link v23.1/ui-overview.md %}) now show the seconds and milliseconds for all timestamp values. [#92571][#92571] -- Links were added on the fingerprint ID in the **High Contention** table on the **Transaction Insights Details** page. [#92612][#92612] -- The following new charts were added to the Metrics page, under SQL view: **Service Latency: SQL Statements, 99.9th percentile** and **Service Latency: SQL Statements, 99.99th percentile**. [#92591][#92591] -- Renamed the chart on the **Statement Details** page from **Statement Execution and Planning Time** to **Statement Times**. [#92765][#92765] -- The **Transaction resource usage** card on the **Transaction Fingerprin**t page now includes an "Idle latency" row, representing the time spent by the application performing other work while holding this transaction open. [#92951][#92951] -- The **Databases table** page now displays all the grants in a single row per user. [#92871][#92871] -- Added a goroutine scheduling latency graph to the **Overload** dashboard in the DB Console. It shows what the per-node p99 scheduling latency is for goroutines. [#93217][#93217] -- CockroachDB now prevents polling `/settings`, `/nodes_ui,` and `/cluster` endpoints on incorrect login. [#93211][#93211] -- The **Statement** and **Transaction** pages for tenant clusters gained region columns and filters for multi-region tenants. [#92357][#92357] +- The High Contention Time insight description now accurately reflects the event's contention duration in the [DB Console]({% link v23.1/ui-overview.md %}). #89035 +- **Overview** and **Explain Plan** tabs were added to the **Active Statement Details** page. #89021 +- The **Apply** button was added on the **Table Details** page ([DB Console]({% link v23.1/ui-overview.md %}) only) when there is a recommendation to drop an unused index. #90070 +- **Overview** and **Explain Plan** tabs were added to the **Statement Insight Details** page. #90250 +- The [Jobs Page]({% link v23.1/ui-jobs-page.md %}) now includes a column picker. #89678 +- The fingerprint ID values for statements and transactions on the [Insights]({% link v23.1/ui-insights-page.md %}) pages are links that open the respective details page on the time period of the execution of that statement or transaction. #90403 +- Requests to fetch table and database statistics now have limited concurrency. This may make loading the [Databases]({% link v23.1/ui-databases-page.md %}) page slower, but in return should result in making those pages less disruptive. #90210 +- The **Transaction** filter label on the **SQL Activity** page was fixed. #91255 +- The metric graph tooltip styling was fixed to prevent content collapse. #91290 +- Index recommendations were added to the **Statement Active Execution Details** page, and the plan gist was added as the first line of the explain plan. #91629 +- Transaction insights pages now show insights about slow execution with unknown causes, index recommendations, and failed executions. The following fields have also been added on the **Details** page, but are not available for transactions where the insight is "High Contention" (i.e., user name, session ID, rows processed, rows read, rows written, retries, last retry reason, full scan, and transaction priority). #91698 +- The fingerprint ID in hex format was added to the **Statement Details** page and **Transaction Details** page. #91885 +- The contention time, schema, database, table, and index info was added to the **Insights Statement Details** page. #91668 +- The query column in the insight recommendations table was removed. Instead, the statement is included in the description if the transaction being reported has multiple statements. #91955 +- Graphs on the **Metrics** page now downsample using max value instead of average. Previously, zooming out on a graph would cause any spikes in the graph to smooth out, potentially hiding anomalies. These anomalies are now visible even when looking at a zoomed out interval. #92017 +- The **Statement Execution** and **Planning Time** chart on the **Statement Fingerprint** page now includes a third value ("Idle") representing the time spent by the application waiting to execute this statement while holding a transaction open. #92284 +- A list of used index per explain plan was added, under the **Explain Plan** tab on the **Statement Details** page, with links to the table or index details pages. #92463 +- The [Insights]({% link v23.1/ui-insights-page.md %}) pages in the [DB Console]({% link v23.1/ui-overview.md %}) now show the seconds and milliseconds for all timestamp values. #92571 +- Links were added on the fingerprint ID in the **High Contention** table on the **Transaction Insights Details** page. #92612 +- The following new charts were added to the Metrics page, under SQL view: **Service Latency: SQL Statements, 99.9th percentile** and **Service Latency: SQL Statements, 99.99th percentile**. #92591 +- Renamed the chart on the **Statement Details** page from **Statement Execution and Planning Time** to **Statement Times**. #92765 +- The **Transaction resource usage** card on the **Transaction Fingerprin**t page now includes an "Idle latency" row, representing the time spent by the application performing other work while holding this transaction open. #92951 +- The **Databases table** page now displays all the grants in a single row per user. #92871 +- Added a goroutine scheduling latency graph to the **Overload** dashboard in the DB Console. It shows what the per-node p99 scheduling latency is for goroutines. #93217 +- CockroachDB now prevents polling `/settings`, `/nodes_ui,` and `/cluster` endpoints on incorrect login. #93211 +- The **Statement** and **Transaction** pages for tenant clusters gained region columns and filters for multi-region tenants. #92357

Bug fixes

-- The flag `--sql-advertise-addr` now properly works even when the SQL and RPC ports are shared (because `--sql-addr` was not specified). Note that this port sharing is a deprecated feature in v22.2. [#87412][#87412] -- Fixed a bug introduced in v21.2 that could cause an internal error in rare cases when a query required a constrained index scan to return results in order. [#87562][#87562] -- Fixed a bug that existed from before v21.1 that could cause an internal error when executing a query with a limit ordering on the output of a window function. [#87320][#87320] -- Fixed an incorrect default value of `cloudstorage.gs.chunking.retry_timeout` to 60 seconds [#87817][#87817] -- Fixed a bug in `pg_catalog` tables that could result in an internal error if a schema is concurrently dropped. [#88568][#88568] -- Fixed a bug that caused `ALTER CHANGEFEED` to fail if the changefeed was created with a cursor option and had been running for more than `gc.ttlseconds`. [#88402][#88402] -- Fixed a bug that could cause a panic when running a query with `EXPLAIN` that attempts to order on a non-output column. [#88441][#88441] -- Fixed missing automatic statistics collection at cluster startup when the `sql.stats.automatic_collection.enabled` cluster setting is false, but there are tables with the storage parameter `sql_stats_automatic_collection_enabled` set to true. [#88673][#88673] -- Fixes an issue where when a statement bundle was collected for a query that results in an error due to a `statement_timeout`, the bundle would not be saved. [#88080][#88080] -- CockroachDB now excludes check constraints of hash-shared indexes from being invalidated when executing `IMPORT INTO`. [#89231][#89231] -- CockroachDB now flushes WAL when writing storage checkpoints on consistency checker failures. [#89369][#89369] -- Fixed optimizer selectivity and cost estimates of zigzag joins in order to prevent query plans from using it when it would perform poorly (e.g., when many rows are qualified). [#89261][#89261] -- Changefeeds will now never permanently error on a "failed to send RPC" error. [#87763][#87763] -- Fixed a bug that could occur when dropping a role that owned two schemas with the same name in different databases. The bug was introduced in v22.1.0. [#89504][#89504] -- CockroachDB now avoids a source of internal connectivity problems that would resolve after restarting the affected node. [#89539][#89539] -- CockroachDB now shows the correct value on table stats on UI, when there are no values to show. [#89867][#89867] -- Charts on the **Statement Details** page in the DB Console are no longer overlapping. [#90014][#90014] -- It is now possible to create tables, views, columns, etc. with the name `nothing` (e.g., `CREATE TABLE nothing...`) without having to quote the name, like in PostgreSQL. This bug was introduced in CockroachDB v2.0. [#89903][#89903] -- Fixed detection and erroring out of queries using locality-optimized joins when the session setting `enforce_home_region` is true and the input table to the join has no home region or its home region does not match the gateway region. [#90107][#90107] -- Fixed an issue with the `enforce_home_region` session setting which may cause `SHOW CREATE TABLE` or other non-DML statements to error out if the optimizer plan for the statement involves accessing a multi-region table. [#90007][#90007] -- Fixed a bug in `changefeed.batch_reduction_retry` which resulted in only a single level of retry being able to occur. [#90153][#90153] -- During JWT based auth, CockroachDB now infers the algorithm type if it is not specified by the JWKS. This enables support for a wider range of keys. [#89989][#89989] -- Fixed an extremely rare out of bounds crash in the protected timestamp subsystem. [#90357][#90357] -- Fixed the calculation of the `pg_attribute.attnum` column for indexes so that the `attnum` is always based on the order the column appears in the index. Also fixed the `pg_attribute` table so that it includes stored columns in secondary indexes. [#90287][#90287] -- Fixed a bug in the DB Console where when the height of the filter was big, you had to scroll to get to the **Apply** button. [#90457][#90457] -- Fixed a bug in the DB Console to now send the proper start and end values to the endpoint used on the **SQL Activity** page so it returns the full hour as described on the UI. [#90403][#90403] -- Fixed a rare bug where concurrent follower read/split operations could lead to invalid read results. [#89886][#89886] -- Fixed a bug that could cause `UPDATE .. FROM` clauses to update the same row multiple times, resulting in incorrect `UPDATED` row counts and duplicate output rows for statements with a `RETURNING` clause. The bug only appeared when the target table had a hidden primary key column (e.g., an implicit `rowid` primary key column). The bug has been present since support for `UPDATE .. FROM` was added in v19.0. [#89780][#89780] -- Protected timestamps are now created during index validation. Before, index validation could be starved if it took longer than any GC jobs for a given table. [#89540][#89540] -- Fixed a bug where `SELECT *` operations on tables with virtual computed columns undergoing schema changes could potentially fail. [#90670][#90670] -- Fixed a bug where in large, multi-region clusters it was possible for the leasing mechanism used for jobs to get caught in a live-lock scenario whereby jobs could not be adopted. [#90875][#90875] -- CockroachDB now ensures changefeeds shut down when one of the aggregator nodes returns an error. [#90767][#90767] -- Fixed a bug the occurred when attempting to reduce the size of a fixed-size `VARCHAR` column. [#91078][#91078] -- Fixed a bug that caused ranges to remain without a leaseholder in cases of asymmetric network partitions. [#87244][#87244] -- Fixed a bug that would prevent data from a failed restore from being cleaned up quickly. [#88342][#88342] -- Fixed a bug which, in rare cases, could result in a changefeed missing rows which occur around the time of a split in writing transactions which take longer than the closed timestamp target duration (defaults to 3s). [#91116][#91116] -- Fixed a bug where point lookups on the `pg_catalog.pg_type` table would fail to find the implicit record type that gets created for tables in the `pg_catalog`, `information_schema`, and `crdb_internal` schemas. [#90924][#90924] -- Fixed a bug that prevented the usage of implicit record types for tables in the `pg_catalog`, `information_schema`, and `crdb_internal` schemas. [#90924][#90924] -- Fixed a bug that could result in transient errors when dropping a database and immediately recreating a database with the same name and connecting to it for use. [#91174][#91174] -- Fixes a bug that resulted in the regions listed for databases and tables including an incorrect list of regions due to the logic including information about tables which are adjacent in the keyspace. [#91130][#91130] -- Fixed a bug where the experimental `scrub` command did not handle type descriptors in the database. [#91085][#91085] -- Fixed a panic that could occur when calling `st_distancespheroid` or `st_distancesphere` with a spatial object containing an `NaN` coordinate. This now produces an error, with the message "input is out of range". [#90218][#90218] -- Fixed a bug that could result in infrequent progress updates for very large backup or restore jobs. [#89971][#89971] -- Added leading zeros to fingerprint IDs with less than 16 characters. [#91885][#91885] -- Fixed a bug causing changefeeds to fail when a value is deleted while running on a non-primary column family with multiple columns. [#91870][#91870] -- Fixed a bug that existed since before v21.1 where the `cgroup` memory limit was undetected when using `systemd`. [#91789][#91789] -- Fixed a bug that existed since v20.2 that could cause incorrect results in rare cases for queries with inner joins and left joins. [#91425][#91425] -- Fixed an unhandled error that could happen if `ALTER DEFAULT PRIVILEGES` was run on the system database. [#92075][#92075] -- CockroachDB now prevents schema changes on the `crdb_internal_expiration` table. [#91720][#91720] -- When configured to true, the `sql.metrics.statement_details.dump_to_logs` cluster setting no longer causes a mutex deadlock. [#92272][#92272] -- Fixed a bug that could lead to errors when running multiple schema change statements in a single command using a driver that uses the extended pgwire protocol internally (for example the Npgsql C# driver). The error messages was "attempted to update job for mutation 2, but job already exists with mutation 1". [#92300][#92300] -- Fixed the **Statement Activity** page so that it no longer shows multi-statement implicit transactions as "explicit." [#92408][#92408] -- Server crashes that occur during startup are now more clearly reported in logs and the standard error output. [#91823][#91823] -- Fixed incorrect cancellation logic when attempting to detect stuck rangefeeds. [#92582][#92582] -- Fixed an internal error when comparing a tuple type with a non-tuple type. [#92635][#92635] -- Fixed incorrect selectivity estimation for queries with `OR` predicates all referencing a common single table. [#89358][#89358] -- Added sort setting to tables on the **Transaction** and **Statement Insights Details** pages. [#92573][#92573] -- Fixed an issue where `changefeed.emitted_messages` would be increased twice per message for changefeed cloud storage sinks. [#92685][#92685] -- Fixed a bug where `attidentity` in `pg_attribute` for the `GENERATED BY DEFAULT AS IDENTITY` column should be `d`. [#92545][#92545] -- CockroachDB previously could incorrectly evaluate queries that performed left semi and left anti "virtual lookup" joins on tables in `pg_catalog` or `information_schema`. These join types can be planned when a subquery is used inside of a filter condition. The bug was introduced in v20.2.0 and is now fixed. [#92713][#92713] -- Fixed a link to index details on the **Drop Index Insights** in the Cloud Console. [#92953][#92953] -- Fixed a bug where encoding of `ARRAY` type to Parquet format would fail in some cases during the `EXPORT` command. [#92948][#92948] -- Fixed a rare panic only present in v22.2.0 that occurs when using particular forms of existing statistics in table statistics forecasting. [#92707][#92707] -- In the presence of several backup files, CockroachDB now speeds up slow listing calls that could manifest as restore queries hanging during execution. [#93072][#93072] -- Prepared statements that use type hints can now succeed type-checking in more cases when the placeholder type is ambiguous. [#92834][#92834] -- Fixed a bug where glob patterns that matched no tables in `GRANT` or `REVOKE` statements would return an internal error with a confusing message as opposed to the appropriate "no objects matched" error. [#93173][#93173] -- Fixed a bug where empty `COPY` commands would not escape after an EOF character or error if encountering a `\.` with no input. [#93100][#93100] -- Fixed a bug where in PostgreSQL extended protocol mode it was possible for auto-commits to not execute certain logic for DDL, when certain DML (insert/update/delete) and DDL were combined in an implicit transaction. [#93283][#93283] -- Fixed the `pg_table_is_visible` built-in function so it correctly reports visibility of indexes based on the current `search_path`. [#90649][#90649] -- Fixed a bug that would result in incomplete backups when non-default, non-public resource limiting settings (`kv.bulk_sst.max_request_time` or `admission.elastic_cpu.enabled`) were enabled. [#92825][#92825] -- The `pg_function_is_visible` function now correctly reports visibility based on the functions that are visible on the current `search_path`. [#90657][#90657] -- Fixed a rare bug that could cause upgrades from v22.1 to v22.2 to fail if the job coordinator node crashes in the middle of a specific upgrade migration. [#93487][#93487] -- Fixed a bug for queries with disjunctions (i.e., contains `OR`) where all the columns referenced in the disjunctions are known to have a single value. [#93480][#93480] -- Fixed a bug introduced in v22.1.0 in which the non-default nulls ordering, `NULLS LAST`, was ignored in window and aggregate functions. This bug would cause incorrect query results when `NULLS LAST` was used. [#93426][#93426] -- Fixed a bug that caused an internal error when trying to execute a UDF with an empty function body. This bug was present since UDFs were introduced in v22.2.0. [#93331][#93331] -- Fixed an issue where `DISTINCT ON` queries would fail with the error "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" when the query included an `ORDER BY` clause containing `ASC NULLS LAST` or `DESC NULLS FIRST`. [#93567][#93567] -- Fixed a bug where selecting a small timeframe in the past in a timeseries query resulted in no data in the graphs. [#93293][#93293] -- Fixed a bug where CockroachDB would error when receiving Geometry/Geography using binary parameters. [#93563][#93563] -- Fixed an internal error that could occur when comparing a column of type void to `NULL` using `col IS NULL` or `col IS NOT NULL`. [#93652][#93652] -- Fixed a bug where a query would incorrectly pass if a given interval for `AS OF SYSTEM TIME` interval was a small positive duration. [#93146][#93146] +- The flag `--sql-advertise-addr` now properly works even when the SQL and RPC ports are shared (because `--sql-addr` was not specified). Note that this port sharing is a deprecated feature in v22.2. #87412 +- Fixed a bug introduced in v21.2 that could cause an internal error in rare cases when a query required a constrained index scan to return results in order. #87562 +- Fixed a bug that existed from before v21.1 that could cause an internal error when executing a query with a limit ordering on the output of a window function. #87320 +- Fixed an incorrect default value of `cloudstorage.gs.chunking.retry_timeout` to 60 seconds #87817 +- Fixed a bug in `pg_catalog` tables that could result in an internal error if a schema is concurrently dropped. #88568 +- Fixed a bug that caused `ALTER CHANGEFEED` to fail if the changefeed was created with a cursor option and had been running for more than `gc.ttlseconds`. #88402 +- Fixed a bug that could cause a panic when running a query with `EXPLAIN` that attempts to order on a non-output column. #88441 +- Fixed missing automatic statistics collection at cluster startup when the `sql.stats.automatic_collection.enabled` cluster setting is false, but there are tables with the storage parameter `sql_stats_automatic_collection_enabled` set to true. #88673 +- Fixes an issue where when a statement bundle was collected for a query that results in an error due to a `statement_timeout`, the bundle would not be saved. #88080 +- CockroachDB now excludes check constraints of hash-shared indexes from being invalidated when executing `IMPORT INTO`. #89231 +- CockroachDB now flushes WAL when writing storage checkpoints on consistency checker failures. #89369 +- Fixed optimizer selectivity and cost estimates of zigzag joins in order to prevent query plans from using it when it would perform poorly (e.g., when many rows are qualified). #89261 +- Changefeeds will now never permanently error on a "failed to send RPC" error. #87763 +- Fixed a bug that could occur when dropping a role that owned two schemas with the same name in different databases. The bug was introduced in v22.1.0. #89504 +- CockroachDB now avoids a source of internal connectivity problems that would resolve after restarting the affected node. #89539 +- CockroachDB now shows the correct value on table stats on UI, when there are no values to show. #89867 +- Charts on the **Statement Details** page in the DB Console are no longer overlapping. #90014 +- It is now possible to create tables, views, columns, etc. with the name `nothing` (e.g., `CREATE TABLE nothing...`) without having to quote the name, like in PostgreSQL. This bug was introduced in CockroachDB v2.0. #89903 +- Fixed detection and erroring out of queries using locality-optimized joins when the session setting `enforce_home_region` is true and the input table to the join has no home region or its home region does not match the gateway region. #90107 +- Fixed an issue with the `enforce_home_region` session setting which may cause `SHOW CREATE TABLE` or other non-DML statements to error out if the optimizer plan for the statement involves accessing a multi-region table. #90007 +- Fixed a bug in `changefeed.batch_reduction_retry` which resulted in only a single level of retry being able to occur. #90153 +- During JWT based auth, CockroachDB now infers the algorithm type if it is not specified by the JWKS. This enables support for a wider range of keys. #89989 +- Fixed an extremely rare out of bounds crash in the protected timestamp subsystem. #90357 +- Fixed the calculation of the `pg_attribute.attnum` column for indexes so that the `attnum` is always based on the order the column appears in the index. Also fixed the `pg_attribute` table so that it includes stored columns in secondary indexes. #90287 +- Fixed a bug in the DB Console where when the height of the filter was big, you had to scroll to get to the **Apply** button. #90457 +- Fixed a bug in the DB Console to now send the proper start and end values to the endpoint used on the **SQL Activity** page so it returns the full hour as described on the UI. #90403 +- Fixed a rare bug where concurrent follower read/split operations could lead to invalid read results. #89886 +- Fixed a bug that could cause `UPDATE .. FROM` clauses to update the same row multiple times, resulting in incorrect `UPDATED` row counts and duplicate output rows for statements with a `RETURNING` clause. The bug only appeared when the target table had a hidden primary key column (e.g., an implicit `rowid` primary key column). The bug has been present since support for `UPDATE .. FROM` was added in v19.0. #89780 +- Protected timestamps are now created during index validation. Before, index validation could be starved if it took longer than any GC jobs for a given table. #89540 +- Fixed a bug where `SELECT *` operations on tables with virtual computed columns undergoing schema changes could potentially fail. #90670 +- Fixed a bug where in large, multi-region clusters it was possible for the leasing mechanism used for jobs to get caught in a live-lock scenario whereby jobs could not be adopted. #90875 +- CockroachDB now ensures changefeeds shut down when one of the aggregator nodes returns an error. #90767 +- Fixed a bug the occurred when attempting to reduce the size of a fixed-size `VARCHAR` column. #91078 +- Fixed a bug that caused ranges to remain without a leaseholder in cases of asymmetric network partitions. #87244 +- Fixed a bug that would prevent data from a failed restore from being cleaned up quickly. #88342 +- Fixed a bug which, in rare cases, could result in a changefeed missing rows which occur around the time of a split in writing transactions which take longer than the closed timestamp target duration (defaults to 3s). #91116 +- Fixed a bug where point lookups on the `pg_catalog.pg_type` table would fail to find the implicit record type that gets created for tables in the `pg_catalog`, `information_schema`, and `crdb_internal` schemas. #90924 +- Fixed a bug that prevented the usage of implicit record types for tables in the `pg_catalog`, `information_schema`, and `crdb_internal` schemas. #90924 +- Fixed a bug that could result in transient errors when dropping a database and immediately recreating a database with the same name and connecting to it for use. #91174 +- Fixes a bug that resulted in the regions listed for databases and tables including an incorrect list of regions due to the logic including information about tables which are adjacent in the keyspace. #91130 +- Fixed a bug where the experimental `scrub` command did not handle type descriptors in the database. #91085 +- Fixed a panic that could occur when calling `st_distancespheroid` or `st_distancesphere` with a spatial object containing an `NaN` coordinate. This now produces an error, with the message "input is out of range". #90218 +- Fixed a bug that could result in infrequent progress updates for very large backup or restore jobs. #89971 +- Added leading zeros to fingerprint IDs with less than 16 characters. #91885 +- Fixed a bug causing changefeeds to fail when a value is deleted while running on a non-primary column family with multiple columns. #91870 +- Fixed a bug that existed since before v21.1 where the `cgroup` memory limit was undetected when using `systemd`. #91789 +- Fixed a bug that existed since v20.2 that could cause incorrect results in rare cases for queries with inner joins and left joins. #91425 +- Fixed an unhandled error that could happen if `ALTER DEFAULT PRIVILEGES` was run on the system database. #92075 +- CockroachDB now prevents schema changes on the `crdb_internal_expiration` table. #91720 +- When configured to true, the `sql.metrics.statement_details.dump_to_logs` cluster setting no longer causes a mutex deadlock. #92272 +- Fixed a bug that could lead to errors when running multiple schema change statements in a single command using a driver that uses the extended pgwire protocol internally (for example the Npgsql C# driver). The error messages was "attempted to update job for mutation 2, but job already exists with mutation 1". #92300 +- Fixed the **Statement Activity** page so that it no longer shows multi-statement implicit transactions as "explicit." #92408 +- Server crashes that occur during startup are now more clearly reported in logs and the standard error output. #91823 +- Fixed incorrect cancellation logic when attempting to detect stuck rangefeeds. #92582 +- Fixed an internal error when comparing a tuple type with a non-tuple type. #92635 +- Fixed incorrect selectivity estimation for queries with `OR` predicates all referencing a common single table. #89358 +- Added sort setting to tables on the **Transaction** and **Statement Insights Details** pages. #92573 +- Fixed an issue where `changefeed.emitted_messages` would be increased twice per message for changefeed cloud storage sinks. #92685 +- Fixed a bug where `attidentity` in `pg_attribute` for the `GENERATED BY DEFAULT AS IDENTITY` column should be `d`. #92545 +- CockroachDB previously could incorrectly evaluate queries that performed left semi and left anti "virtual lookup" joins on tables in `pg_catalog` or `information_schema`. These join types can be planned when a subquery is used inside of a filter condition. The bug was introduced in v20.2.0 and is now fixed. #92713 +- Fixed a link to index details on the **Drop Index Insights** in the Cloud Console. #92953 +- Fixed a bug where encoding of `ARRAY` type to Parquet format would fail in some cases during the `EXPORT` command. #92948 +- Fixed a rare panic only present in v22.2.0 that occurs when using particular forms of existing statistics in table statistics forecasting. #92707 +- In the presence of several backup files, CockroachDB now speeds up slow listing calls that could manifest as restore queries hanging during execution. #93072 +- Prepared statements that use type hints can now succeed type-checking in more cases when the placeholder type is ambiguous. #92834 +- Fixed a bug where glob patterns that matched no tables in `GRANT` or `REVOKE` statements would return an internal error with a confusing message as opposed to the appropriate "no objects matched" error. #93173 +- Fixed a bug where empty `COPY` commands would not escape after an EOF character or error if encountering a `\.` with no input. #93100 +- Fixed a bug where in PostgreSQL extended protocol mode it was possible for auto-commits to not execute certain logic for DDL, when certain DML (insert/update/delete) and DDL were combined in an implicit transaction. #93283 +- Fixed the `pg_table_is_visible` built-in function so it correctly reports visibility of indexes based on the current `search_path`. #90649 +- Fixed a bug that would result in incomplete backups when non-default, non-public resource limiting settings (`kv.bulk_sst.max_request_time` or `admission.elastic_cpu.enabled`) were enabled. #92825 +- The `pg_function_is_visible` function now correctly reports visibility based on the functions that are visible on the current `search_path`. #90657 +- Fixed a rare bug that could cause upgrades from v22.1 to v22.2 to fail if the job coordinator node crashes in the middle of a specific upgrade migration. #93487 +- Fixed a bug for queries with disjunctions (i.e., contains `OR`) where all the columns referenced in the disjunctions are known to have a single value. #93480 +- Fixed a bug introduced in v22.1.0 in which the non-default nulls ordering, `NULLS LAST`, was ignored in window and aggregate functions. This bug would cause incorrect query results when `NULLS LAST` was used. #93426 +- Fixed a bug that caused an internal error when trying to execute a UDF with an empty function body. This bug was present since UDFs were introduced in v22.2.0. #93331 +- Fixed an issue where `DISTINCT ON` queries would fail with the error "SELECT DISTINCT ON expressions must match initial ORDER BY expressions" when the query included an `ORDER BY` clause containing `ASC NULLS LAST` or `DESC NULLS FIRST`. #93567 +- Fixed a bug where selecting a small timeframe in the past in a timeseries query resulted in no data in the graphs. #93293 +- Fixed a bug where CockroachDB would error when receiving Geometry/Geography using binary parameters. #93563 +- Fixed an internal error that could occur when comparing a column of type void to `NULL` using `col IS NULL` or `col IS NOT NULL`. #93652 +- Fixed a bug where a query would incorrectly pass if a given interval for `AS OF SYSTEM TIME` interval was a small positive duration. #93146

Performance improvements

-- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) will now plan [inverted index]({% link v23.1/inverted-indexes.md %}) scans for queries with JSON subscripting filters, like `json_col['field'] = '"value"`. [#87957][#87957] -- CockroachDB now avoids wasteful contention on the gossip mutex caused by checking if the network needs tightening hundreds of times per second. [#88472][#88472] -- Some types of queries with comparisons with constant values now execute faster. [#88638][#88638] -- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) now explores plans with a single [lookup join]({% link v23.1/joins.md %}#lookup-joins) expressions in rare cases where it previously planned two lookup join expressions. [#88491][#88491] -- Consistency checks are now properly cancelled on timeout, preventing them from piling up. [#86591][#86591] -- Raft ticks now adapt to scheduling delays. This helps preventing re-elections, and the corresponding performance effects, in the event of relatively short (sub-second) processing delays. [#86240][#86240] -- HTTP requests with `Accept-encoding: gzip` previously resulted in valid GZIP-encoded, but uncompressed, responses. This resulted in inefficient HTTP transfer times, as far more bytes were transferred than necessary. Those responses are now properly compressed, resulting in smaller network responses. [#88950][#88950] -- `pg_catalog.col_description` is now much faster when resolving columns for tables in the `pg_catalog`, `crdb_internal`, or `information_schema` namespaces. [#89465][#89465] -- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) now does less copying of histograms while planning queries, which will reduce memory pressure a little. [#88526][#88526] -- Added early inlining of `VALUES` clauses and unnested arrays in `WITH` queries in order to eliminate unnecessary joins. [#87790][#87790] -- Added significantly faster JSON parsing, which should help with any workloads that insert large amounts of JSON data, including [`IMPORT`]({% link v23.1/import.md %}) workloads. [#89884][#89884] -- Loading the **Database Details** page in the DB Console is now somewhat less expensive when there are a large number of databases and a large number of tables in each database and a large number of ranges in the cluster. [#90198][#90198] -- Tables in `pg_catalog` and `information_schema` (when not explicitly referenced as `"".information_schema`) may now be much faster if the current database has a small number of relations relative to the total number in the cluster. [#90116][#90116] -- The overhead of running [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}) and `EXPLAIN ANALYZE (DISTSQL)` has been significantly reduced. The overhead of `EXPLAIN ANALYZE (DEBUG)` did not change. [#91117][#91117] -- Enabled more efficient [lookup joins]({% link v23.1/joins.md %}#lookup-joins) by deriving new join constraints when equijoin predicates exist on the column(s) of a unique constraint on one table which are a proper subset of the referencing columns of a foreign key constraint on the other table. If an index exists on those foreign key constraint referencing columns, equijoin predicates are derived between the primary key and foreign key columns not currently bound by `ON` clause predicates. [#90599][#90599] -- The setup of the distributed query execution is now fully parallelized which should reduce the query latencies, especially in multi-region setups. [#89649][#89649] -- Performance of the `LIKE` and `ILIKE` operators using patterns without any wildcards has been improved. [#91895][#91895] -- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) can now better calculate the properties of recursive [common table expressions]({% link v23.1/common-table-expressions.md %}) in the presence of a `LIMIT`. [#90725][#90725] -- Certain types of reads will now have a far smaller contention footprint with conflicting concurrent writers. [#85993][#85993] -- In some cases, the key-value store client now needs to look up where to send requests. Prior to this change, such lookup requests were always routed to the leaseholder of the metadata range storing that information. Now the client can attempt to look up this metadata from followers. This can improve tail latency in multi-region clusters in cases where the range addressing cache is cold. [#91638][#91638] -- The garbage collection score triggering a MVCC GC run has been lowered. The GC Score is a ratio computed from MVCC stats and uses the ratio of live objects and estimated garbage age to estimate collectability of existing data. The reduced score will trigger garbage collection earlier, lowering the interval between runs 3 times, giving 2 times reduced peak garbage usage at the expense of 30% increase of wasteful data scanning on constantly updated data. [#92118][#92118] -- CockroachDB in some cases now correctly incorporates the value of the `OFFSET` clause when determining the number of rows that need to be read when the `LIMIT` clause is also present. Note that there was no correctness issue here, only that extra unnecessary rows could be read. [#92779][#92779] -- In 22.2, privileges on virtual tables (system catalogs like `pg_catalog`, `information_schema`, and `crdb_internal`) were introduced. A problem with this new feature is that we now must fetch those privileges into a cache before we can use those tables or determine their visibility in other system catalogs. This process used to occur on-demand, when the privilege was needed. Now, CockroachDB will fetch these privileges eagerly during startup to mitigate latency when accessing `pg_catalog` right after the server boots up. [#93557][#93557] +- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) will now plan [inverted index]({% link v23.1/inverted-indexes.md %}) scans for queries with JSON subscripting filters, like `json_col['field'] = '"value"`. #87957 +- CockroachDB now avoids wasteful contention on the gossip mutex caused by checking if the network needs tightening hundreds of times per second. #88472 +- Some types of queries with comparisons with constant values now execute faster. #88638 +- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) now explores plans with a single [lookup join]({% link v23.1/joins.md %}#lookup-joins) expressions in rare cases where it previously planned two lookup join expressions. #88491 +- Consistency checks are now properly cancelled on timeout, preventing them from piling up. #86591 +- Raft ticks now adapt to scheduling delays. This helps preventing re-elections, and the corresponding performance effects, in the event of relatively short (sub-second) processing delays. #86240 +- HTTP requests with `Accept-encoding: gzip` previously resulted in valid GZIP-encoded, but uncompressed, responses. This resulted in inefficient HTTP transfer times, as far more bytes were transferred than necessary. Those responses are now properly compressed, resulting in smaller network responses. #88950 +- `pg_catalog.col_description` is now much faster when resolving columns for tables in the `pg_catalog`, `crdb_internal`, or `information_schema` namespaces. #89465 +- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) now does less copying of histograms while planning queries, which will reduce memory pressure a little. #88526 +- Added early inlining of `VALUES` clauses and unnested arrays in `WITH` queries in order to eliminate unnecessary joins. #87790 +- Added significantly faster JSON parsing, which should help with any workloads that insert large amounts of JSON data, including [`IMPORT`]({% link v23.1/import.md %}) workloads. #89884 +- Loading the **Database Details** page in the DB Console is now somewhat less expensive when there are a large number of databases and a large number of tables in each database and a large number of ranges in the cluster. #90198 +- Tables in `pg_catalog` and `information_schema` (when not explicitly referenced as `"".information_schema`) may now be much faster if the current database has a small number of relations relative to the total number in the cluster. #90116 +- The overhead of running [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}) and `EXPLAIN ANALYZE (DISTSQL)` has been significantly reduced. The overhead of `EXPLAIN ANALYZE (DEBUG)` did not change. #91117 +- Enabled more efficient [lookup joins]({% link v23.1/joins.md %}#lookup-joins) by deriving new join constraints when equijoin predicates exist on the column(s) of a unique constraint on one table which are a proper subset of the referencing columns of a foreign key constraint on the other table. If an index exists on those foreign key constraint referencing columns, equijoin predicates are derived between the primary key and foreign key columns not currently bound by `ON` clause predicates. #90599 +- The setup of the distributed query execution is now fully parallelized which should reduce the query latencies, especially in multi-region setups. #89649 +- Performance of the `LIKE` and `ILIKE` operators using patterns without any wildcards has been improved. #91895 +- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) can now better calculate the properties of recursive [common table expressions]({% link v23.1/common-table-expressions.md %}) in the presence of a `LIMIT`. #90725 +- Certain types of reads will now have a far smaller contention footprint with conflicting concurrent writers. #85993 +- In some cases, the key-value store client now needs to look up where to send requests. Prior to this change, such lookup requests were always routed to the leaseholder of the metadata range storing that information. Now the client can attempt to look up this metadata from followers. This can improve tail latency in multi-region clusters in cases where the range addressing cache is cold. #91638 +- The garbage collection score triggering a MVCC GC run has been lowered. The GC Score is a ratio computed from MVCC stats and uses the ratio of live objects and estimated garbage age to estimate collectability of existing data. The reduced score will trigger garbage collection earlier, lowering the interval between runs 3 times, giving 2 times reduced peak garbage usage at the expense of 30% increase of wasteful data scanning on constantly updated data. #92118 +- CockroachDB in some cases now correctly incorporates the value of the `OFFSET` clause when determining the number of rows that need to be read when the `LIMIT` clause is also present. Note that there was no correctness issue here, only that extra unnecessary rows could be read. #92779 +- In 22.2, privileges on virtual tables (system catalogs like `pg_catalog`, `information_schema`, and `crdb_internal`) were introduced. A problem with this new feature is that we now must fetch those privileges into a cache before we can use those tables or determine their visibility in other system catalogs. This process used to occur on-demand, when the privilege was needed. Now, CockroachDB will fetch these privileges eagerly during startup to mitigate latency when accessing `pg_catalog` right after the server boots up. #93557
@@ -311,278 +311,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#80848]: https://github.com/cockroachdb/cockroach/pull/80848 -[#82028]: https://github.com/cockroachdb/cockroach/pull/82028 -[#82891]: https://github.com/cockroachdb/cockroach/pull/82891 -[#84509]: https://github.com/cockroachdb/cockroach/pull/84509 -[#84888]: https://github.com/cockroachdb/cockroach/pull/84888 -[#85177]: https://github.com/cockroachdb/cockroach/pull/85177 -[#85993]: https://github.com/cockroachdb/cockroach/pull/85993 -[#86106]: https://github.com/cockroachdb/cockroach/pull/86106 -[#86176]: https://github.com/cockroachdb/cockroach/pull/86176 -[#86240]: https://github.com/cockroachdb/cockroach/pull/86240 -[#86457]: https://github.com/cockroachdb/cockroach/pull/86457 -[#86591]: https://github.com/cockroachdb/cockroach/pull/86591 -[#86603]: https://github.com/cockroachdb/cockroach/pull/86603 -[#86968]: https://github.com/cockroachdb/cockroach/pull/86968 -[#87166]: https://github.com/cockroachdb/cockroach/pull/87166 -[#87244]: https://github.com/cockroachdb/cockroach/pull/87244 -[#87320]: https://github.com/cockroachdb/cockroach/pull/87320 -[#87412]: https://github.com/cockroachdb/cockroach/pull/87412 -[#87562]: https://github.com/cockroachdb/cockroach/pull/87562 -[#87606]: https://github.com/cockroachdb/cockroach/pull/87606 -[#87763]: https://github.com/cockroachdb/cockroach/pull/87763 -[#87790]: https://github.com/cockroachdb/cockroach/pull/87790 -[#87817]: https://github.com/cockroachdb/cockroach/pull/87817 -[#87957]: https://github.com/cockroachdb/cockroach/pull/87957 -[#87994]: https://github.com/cockroachdb/cockroach/pull/87994 -[#88064]: https://github.com/cockroachdb/cockroach/pull/88064 -[#88080]: https://github.com/cockroachdb/cockroach/pull/88080 -[#88136]: https://github.com/cockroachdb/cockroach/pull/88136 -[#88173]: https://github.com/cockroachdb/cockroach/pull/88173 -[#88342]: https://github.com/cockroachdb/cockroach/pull/88342 -[#88395]: https://github.com/cockroachdb/cockroach/pull/88395 -[#88402]: https://github.com/cockroachdb/cockroach/pull/88402 -[#88441]: https://github.com/cockroachdb/cockroach/pull/88441 -[#88472]: https://github.com/cockroachdb/cockroach/pull/88472 -[#88491]: https://github.com/cockroachdb/cockroach/pull/88491 -[#88526]: https://github.com/cockroachdb/cockroach/pull/88526 -[#88539]: https://github.com/cockroachdb/cockroach/pull/88539 -[#88562]: https://github.com/cockroachdb/cockroach/pull/88562 -[#88568]: https://github.com/cockroachdb/cockroach/pull/88568 -[#88625]: https://github.com/cockroachdb/cockroach/pull/88625 -[#88635]: https://github.com/cockroachdb/cockroach/pull/88635 -[#88638]: https://github.com/cockroachdb/cockroach/pull/88638 -[#88672]: https://github.com/cockroachdb/cockroach/pull/88672 -[#88673]: https://github.com/cockroachdb/cockroach/pull/88673 -[#88720]: https://github.com/cockroachdb/cockroach/pull/88720 -[#88770]: https://github.com/cockroachdb/cockroach/pull/88770 -[#88798]: https://github.com/cockroachdb/cockroach/pull/88798 -[#88861]: https://github.com/cockroachdb/cockroach/pull/88861 -[#88950]: https://github.com/cockroachdb/cockroach/pull/88950 -[#88974]: https://github.com/cockroachdb/cockroach/pull/88974 -[#89021]: https://github.com/cockroachdb/cockroach/pull/89021 -[#89033]: https://github.com/cockroachdb/cockroach/pull/89033 -[#89035]: https://github.com/cockroachdb/cockroach/pull/89035 -[#89124]: https://github.com/cockroachdb/cockroach/pull/89124 -[#89150]: https://github.com/cockroachdb/cockroach/pull/89150 -[#89217]: https://github.com/cockroachdb/cockroach/pull/89217 -[#89231]: https://github.com/cockroachdb/cockroach/pull/89231 -[#89256]: https://github.com/cockroachdb/cockroach/pull/89256 -[#89261]: https://github.com/cockroachdb/cockroach/pull/89261 -[#89329]: https://github.com/cockroachdb/cockroach/pull/89329 -[#89333]: https://github.com/cockroachdb/cockroach/pull/89333 -[#89358]: https://github.com/cockroachdb/cockroach/pull/89358 -[#89369]: https://github.com/cockroachdb/cockroach/pull/89369 -[#89392]: https://github.com/cockroachdb/cockroach/pull/89392 -[#89451]: https://github.com/cockroachdb/cockroach/pull/89451 -[#89465]: https://github.com/cockroachdb/cockroach/pull/89465 -[#89502]: https://github.com/cockroachdb/cockroach/pull/89502 -[#89504]: https://github.com/cockroachdb/cockroach/pull/89504 -[#89517]: https://github.com/cockroachdb/cockroach/pull/89517 -[#89531]: https://github.com/cockroachdb/cockroach/pull/89531 -[#89539]: https://github.com/cockroachdb/cockroach/pull/89539 -[#89540]: https://github.com/cockroachdb/cockroach/pull/89540 -[#89557]: https://github.com/cockroachdb/cockroach/pull/89557 -[#89649]: https://github.com/cockroachdb/cockroach/pull/89649 -[#89650]: https://github.com/cockroachdb/cockroach/pull/89650 -[#89668]: https://github.com/cockroachdb/cockroach/pull/89668 -[#89678]: https://github.com/cockroachdb/cockroach/pull/89678 -[#89757]: https://github.com/cockroachdb/cockroach/pull/89757 -[#89768]: https://github.com/cockroachdb/cockroach/pull/89768 -[#89780]: https://github.com/cockroachdb/cockroach/pull/89780 -[#89847]: https://github.com/cockroachdb/cockroach/pull/89847 -[#89867]: https://github.com/cockroachdb/cockroach/pull/89867 -[#89873]: https://github.com/cockroachdb/cockroach/pull/89873 -[#89884]: https://github.com/cockroachdb/cockroach/pull/89884 -[#89886]: https://github.com/cockroachdb/cockroach/pull/89886 -[#89899]: https://github.com/cockroachdb/cockroach/pull/89899 -[#89903]: https://github.com/cockroachdb/cockroach/pull/89903 -[#89913]: https://github.com/cockroachdb/cockroach/pull/89913 -[#89971]: https://github.com/cockroachdb/cockroach/pull/89971 -[#89989]: https://github.com/cockroachdb/cockroach/pull/89989 -[#90000]: https://github.com/cockroachdb/cockroach/pull/90000 -[#90002]: https://github.com/cockroachdb/cockroach/pull/90002 -[#90007]: https://github.com/cockroachdb/cockroach/pull/90007 -[#90014]: https://github.com/cockroachdb/cockroach/pull/90014 -[#90055]: https://github.com/cockroachdb/cockroach/pull/90055 -[#90070]: https://github.com/cockroachdb/cockroach/pull/90070 -[#90107]: https://github.com/cockroachdb/cockroach/pull/90107 -[#90116]: https://github.com/cockroachdb/cockroach/pull/90116 -[#90153]: https://github.com/cockroachdb/cockroach/pull/90153 -[#90198]: https://github.com/cockroachdb/cockroach/pull/90198 -[#90210]: https://github.com/cockroachdb/cockroach/pull/90210 -[#90218]: https://github.com/cockroachdb/cockroach/pull/90218 -[#90250]: https://github.com/cockroachdb/cockroach/pull/90250 -[#90270]: https://github.com/cockroachdb/cockroach/pull/90270 -[#90287]: https://github.com/cockroachdb/cockroach/pull/90287 -[#90357]: https://github.com/cockroachdb/cockroach/pull/90357 -[#90403]: https://github.com/cockroachdb/cockroach/pull/90403 -[#90427]: https://github.com/cockroachdb/cockroach/pull/90427 -[#90439]: https://github.com/cockroachdb/cockroach/pull/90439 -[#90457]: https://github.com/cockroachdb/cockroach/pull/90457 -[#90485]: https://github.com/cockroachdb/cockroach/pull/90485 -[#90488]: https://github.com/cockroachdb/cockroach/pull/90488 -[#90491]: https://github.com/cockroachdb/cockroach/pull/90491 -[#90520]: https://github.com/cockroachdb/cockroach/pull/90520 -[#90599]: https://github.com/cockroachdb/cockroach/pull/90599 -[#90619]: https://github.com/cockroachdb/cockroach/pull/90619 -[#90649]: https://github.com/cockroachdb/cockroach/pull/90649 -[#90657]: https://github.com/cockroachdb/cockroach/pull/90657 -[#90660]: https://github.com/cockroachdb/cockroach/pull/90660 -[#90670]: https://github.com/cockroachdb/cockroach/pull/90670 -[#90725]: https://github.com/cockroachdb/cockroach/pull/90725 -[#90767]: https://github.com/cockroachdb/cockroach/pull/90767 -[#90789]: https://github.com/cockroachdb/cockroach/pull/90789 -[#90810]: https://github.com/cockroachdb/cockroach/pull/90810 -[#90842]: https://github.com/cockroachdb/cockroach/pull/90842 -[#90865]: https://github.com/cockroachdb/cockroach/pull/90865 -[#90875]: https://github.com/cockroachdb/cockroach/pull/90875 -[#90924]: https://github.com/cockroachdb/cockroach/pull/90924 -[#90956]: https://github.com/cockroachdb/cockroach/pull/90956 -[#90981]: https://github.com/cockroachdb/cockroach/pull/90981 -[#91040]: https://github.com/cockroachdb/cockroach/pull/91040 -[#91078]: https://github.com/cockroachdb/cockroach/pull/91078 -[#91085]: https://github.com/cockroachdb/cockroach/pull/91085 -[#91098]: https://github.com/cockroachdb/cockroach/pull/91098 -[#91116]: https://github.com/cockroachdb/cockroach/pull/91116 -[#91117]: https://github.com/cockroachdb/cockroach/pull/91117 -[#91130]: https://github.com/cockroachdb/cockroach/pull/91130 -[#91153]: https://github.com/cockroachdb/cockroach/pull/91153 -[#91162]: https://github.com/cockroachdb/cockroach/pull/91162 -[#91174]: https://github.com/cockroachdb/cockroach/pull/91174 -[#91179]: https://github.com/cockroachdb/cockroach/pull/91179 -[#91205]: https://github.com/cockroachdb/cockroach/pull/91205 -[#91228]: https://github.com/cockroachdb/cockroach/pull/91228 -[#91248]: https://github.com/cockroachdb/cockroach/pull/91248 -[#91255]: https://github.com/cockroachdb/cockroach/pull/91255 -[#91290]: https://github.com/cockroachdb/cockroach/pull/91290 -[#91323]: https://github.com/cockroachdb/cockroach/pull/91323 -[#91382]: https://github.com/cockroachdb/cockroach/pull/91382 -[#91425]: https://github.com/cockroachdb/cockroach/pull/91425 -[#91564]: https://github.com/cockroachdb/cockroach/pull/91564 -[#91604]: https://github.com/cockroachdb/cockroach/pull/91604 -[#91629]: https://github.com/cockroachdb/cockroach/pull/91629 -[#91638]: https://github.com/cockroachdb/cockroach/pull/91638 -[#91668]: https://github.com/cockroachdb/cockroach/pull/91668 -[#91689]: https://github.com/cockroachdb/cockroach/pull/91689 -[#91698]: https://github.com/cockroachdb/cockroach/pull/91698 -[#91720]: https://github.com/cockroachdb/cockroach/pull/91720 -[#91762]: https://github.com/cockroachdb/cockroach/pull/91762 -[#91789]: https://github.com/cockroachdb/cockroach/pull/91789 -[#91823]: https://github.com/cockroachdb/cockroach/pull/91823 -[#91870]: https://github.com/cockroachdb/cockroach/pull/91870 -[#91885]: https://github.com/cockroachdb/cockroach/pull/91885 -[#91895]: https://github.com/cockroachdb/cockroach/pull/91895 -[#91947]: https://github.com/cockroachdb/cockroach/pull/91947 -[#91955]: https://github.com/cockroachdb/cockroach/pull/91955 -[#92017]: https://github.com/cockroachdb/cockroach/pull/92017 -[#92075]: https://github.com/cockroachdb/cockroach/pull/92075 -[#92118]: https://github.com/cockroachdb/cockroach/pull/92118 -[#92131]: https://github.com/cockroachdb/cockroach/pull/92131 -[#92231]: https://github.com/cockroachdb/cockroach/pull/92231 -[#92232]: https://github.com/cockroachdb/cockroach/pull/92232 -[#92253]: https://github.com/cockroachdb/cockroach/pull/92253 -[#92263]: https://github.com/cockroachdb/cockroach/pull/92263 -[#92272]: https://github.com/cockroachdb/cockroach/pull/92272 -[#92284]: https://github.com/cockroachdb/cockroach/pull/92284 -[#92300]: https://github.com/cockroachdb/cockroach/pull/92300 -[#92330]: https://github.com/cockroachdb/cockroach/pull/92330 -[#92351]: https://github.com/cockroachdb/cockroach/pull/92351 -[#92357]: https://github.com/cockroachdb/cockroach/pull/92357 -[#92406]: https://github.com/cockroachdb/cockroach/pull/92406 -[#92408]: https://github.com/cockroachdb/cockroach/pull/92408 -[#92463]: https://github.com/cockroachdb/cockroach/pull/92463 -[#92464]: https://github.com/cockroachdb/cockroach/pull/92464 -[#92542]: https://github.com/cockroachdb/cockroach/pull/92542 -[#92545]: https://github.com/cockroachdb/cockroach/pull/92545 -[#92571]: https://github.com/cockroachdb/cockroach/pull/92571 -[#92573]: https://github.com/cockroachdb/cockroach/pull/92573 -[#92582]: https://github.com/cockroachdb/cockroach/pull/92582 -[#92591]: https://github.com/cockroachdb/cockroach/pull/92591 -[#92612]: https://github.com/cockroachdb/cockroach/pull/92612 -[#92635]: https://github.com/cockroachdb/cockroach/pull/92635 -[#92685]: https://github.com/cockroachdb/cockroach/pull/92685 -[#92694]: https://github.com/cockroachdb/cockroach/pull/92694 -[#92695]: https://github.com/cockroachdb/cockroach/pull/92695 -[#92707]: https://github.com/cockroachdb/cockroach/pull/92707 -[#92713]: https://github.com/cockroachdb/cockroach/pull/92713 -[#92765]: https://github.com/cockroachdb/cockroach/pull/92765 -[#92775]: https://github.com/cockroachdb/cockroach/pull/92775 -[#92779]: https://github.com/cockroachdb/cockroach/pull/92779 -[#92825]: https://github.com/cockroachdb/cockroach/pull/92825 -[#92834]: https://github.com/cockroachdb/cockroach/pull/92834 -[#92871]: https://github.com/cockroachdb/cockroach/pull/92871 -[#92948]: https://github.com/cockroachdb/cockroach/pull/92948 -[#92951]: https://github.com/cockroachdb/cockroach/pull/92951 -[#92953]: https://github.com/cockroachdb/cockroach/pull/92953 -[#92957]: https://github.com/cockroachdb/cockroach/pull/92957 -[#93072]: https://github.com/cockroachdb/cockroach/pull/93072 -[#93100]: https://github.com/cockroachdb/cockroach/pull/93100 -[#93146]: https://github.com/cockroachdb/cockroach/pull/93146 -[#93173]: https://github.com/cockroachdb/cockroach/pull/93173 -[#93211]: https://github.com/cockroachdb/cockroach/pull/93211 -[#93217]: https://github.com/cockroachdb/cockroach/pull/93217 -[#93255]: https://github.com/cockroachdb/cockroach/pull/93255 -[#93283]: https://github.com/cockroachdb/cockroach/pull/93283 -[#93293]: https://github.com/cockroachdb/cockroach/pull/93293 -[#93300]: https://github.com/cockroachdb/cockroach/pull/93300 -[#93331]: https://github.com/cockroachdb/cockroach/pull/93331 -[#93343]: https://github.com/cockroachdb/cockroach/pull/93343 -[#93399]: https://github.com/cockroachdb/cockroach/pull/93399 -[#93416]: https://github.com/cockroachdb/cockroach/pull/93416 -[#93418]: https://github.com/cockroachdb/cockroach/pull/93418 -[#93423]: https://github.com/cockroachdb/cockroach/pull/93423 -[#93426]: https://github.com/cockroachdb/cockroach/pull/93426 -[#93480]: https://github.com/cockroachdb/cockroach/pull/93480 -[#93487]: https://github.com/cockroachdb/cockroach/pull/93487 -[#93528]: https://github.com/cockroachdb/cockroach/pull/93528 -[#93545]: https://github.com/cockroachdb/cockroach/pull/93545 -[#93557]: https://github.com/cockroachdb/cockroach/pull/93557 -[#93563]: https://github.com/cockroachdb/cockroach/pull/93563 -[#93567]: https://github.com/cockroachdb/cockroach/pull/93567 -[#93652]: https://github.com/cockroachdb/cockroach/pull/93652 -[0755afad4]: https://github.com/cockroachdb/cockroach/commit/0755afad4 -[083c44d42]: https://github.com/cockroachdb/cockroach/commit/083c44d42 -[0fc005ffe]: https://github.com/cockroachdb/cockroach/commit/0fc005ffe -[14c89e140]: https://github.com/cockroachdb/cockroach/commit/14c89e140 -[1dbc43e15]: https://github.com/cockroachdb/cockroach/commit/1dbc43e15 -[239786a46]: https://github.com/cockroachdb/cockroach/commit/239786a46 -[241d790aa]: https://github.com/cockroachdb/cockroach/commit/241d790aa -[25ace40d6]: https://github.com/cockroachdb/cockroach/commit/25ace40d6 -[26961cae5]: https://github.com/cockroachdb/cockroach/commit/26961cae5 -[2b0042753]: https://github.com/cockroachdb/cockroach/commit/2b0042753 -[2c41acedc]: https://github.com/cockroachdb/cockroach/commit/2c41acedc -[35ba9e615]: https://github.com/cockroachdb/cockroach/commit/35ba9e615 -[3925dea41]: https://github.com/cockroachdb/cockroach/commit/3925dea41 -[397d5da2c]: https://github.com/cockroachdb/cockroach/commit/397d5da2c -[4ef9e78a9]: https://github.com/cockroachdb/cockroach/commit/4ef9e78a9 -[4f93166cf]: https://github.com/cockroachdb/cockroach/commit/4f93166cf -[51e22010b]: https://github.com/cockroachdb/cockroach/commit/51e22010b -[5a0435b87]: https://github.com/cockroachdb/cockroach/commit/5a0435b87 -[5a85950bf]: https://github.com/cockroachdb/cockroach/commit/5a85950bf -[654fa25dd]: https://github.com/cockroachdb/cockroach/commit/654fa25dd -[6f1a6d8e0]: https://github.com/cockroachdb/cockroach/commit/6f1a6d8e0 -[714b4d363]: https://github.com/cockroachdb/cockroach/commit/714b4d363 -[89058d537]: https://github.com/cockroachdb/cockroach/commit/89058d537 -[8f96adfc4]: https://github.com/cockroachdb/cockroach/commit/8f96adfc4 -[9181af088]: https://github.com/cockroachdb/cockroach/commit/9181af088 -[91953e542]: https://github.com/cockroachdb/cockroach/commit/91953e542 -[93d118387]: https://github.com/cockroachdb/cockroach/commit/93d118387 -[a7f3ae671]: https://github.com/cockroachdb/cockroach/commit/a7f3ae671 -[aa3c59ccb]: https://github.com/cockroachdb/cockroach/commit/aa3c59ccb -[acbcd7380]: https://github.com/cockroachdb/cockroach/commit/acbcd7380 -[bed0710b7]: https://github.com/cockroachdb/cockroach/commit/bed0710b7 -[bf8f93064]: https://github.com/cockroachdb/cockroach/commit/bf8f93064 -[c6bc7b47c]: https://github.com/cockroachdb/cockroach/commit/c6bc7b47c -[c98c82b0f]: https://github.com/cockroachdb/cockroach/commit/c98c82b0f -[e0f8edec4]: https://github.com/cockroachdb/cockroach/commit/e0f8edec4 -[e2d92e896]: https://github.com/cockroachdb/cockroach/commit/e2d92e896 -[e8226497d]: https://github.com/cockroachdb/cockroach/commit/e8226497d -[eb84eba69]: https://github.com/cockroachdb/cockroach/commit/eb84eba69 -[f1e59d1b6]: https://github.com/cockroachdb/cockroach/commit/f1e59d1b6 -[f4118b091]: https://github.com/cockroachdb/cockroach/commit/f4118b091 -[faa7072ef]: https://github.com/cockroachdb/cockroach/commit/faa7072ef -[fb6157ab2]: https://github.com/cockroachdb/cockroach/commit/fb6157ab2 -[fc2d18064]: https://github.com/cockroachdb/cockroach/commit/fc2d18064 -[fd0d77e2d]: https://github.com/cockroachdb/cockroach/commit/fd0d77e2d diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.2.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.2.md index 5d7fcc00a21..02f5d74550c 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.2.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.2.md @@ -14,18 +14,18 @@ Release Date: February 13, 2022 - Instead of: `SELECT range_id FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT range_id FROM [SHOW RANGES FROM TABLE x]` - Instead of `SELECT range_id FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (variable / unpredictable table name or ID), use: `SELECT range_id FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES] WHERE table_name = $1 OR table_id = $2` - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT raw_start_key FROM [SHOW RANGES FROM TABLE x WITH KEYS]` - - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (unpredictable / variable table name or ID), use: `SELECT raw_start_key FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES, KEYS] WHERE table_name = $1 OR table_id = $2` [#93644][#93644] -- The format of the columns `start_key` and `end_key` for `SHOW RANGES FROM DATABASE` and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. [#93644][#93644] -- The format of the columns `start_key` and `end_key` for `SHOW RANGE ... FOR ROW` has been changed to be consistent with the output of `SHOW RANGES FROM INDEX`. [#93644][#93644] -- The output of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option `WITH DETAILS` to include these columns. [#93644][#93644] -- Changefeeds using "preview" expressions (released in v22.2.0) and that access the previous state of the row using the `cdc_prev()` function will no longer work and will need to be recreated with new syntax. [#94429][#94429] -- Fixed a bug where, when `server.identity_map.configuration` was used, CockroachDB did not verify the client-provided username against the target mappings. Note that **this means that the client must now provide a valid DB username.** This requirement is compatible with PostgreSQL; it was not previously required by CockroachDB but it is now. This does not apply when identity maps are not in use. [#94915][#94915] -- Previously, the type of the `replicas`, `voting_replicas`,`non_voting_replicas` and `learner_replicas` in `crdb_internal.ranges` were overridden to `INT2VECTOR` causing incompatible indexing between `.ranges` and `.ranges_no_leases`. Now the types of those columns in the two tables are set to `INT[]`. [#96287][#96287] + - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (unpredictable / variable table name or ID), use: `SELECT raw_start_key FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES, KEYS] WHERE table_name = $1 OR table_id = $2` #93644 +- The format of the columns `start_key` and `end_key` for `SHOW RANGES FROM DATABASE` and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. #93644 +- The format of the columns `start_key` and `end_key` for `SHOW RANGE ... FOR ROW` has been changed to be consistent with the output of `SHOW RANGES FROM INDEX`. #93644 +- The output of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option `WITH DETAILS` to include these columns. #93644 +- Changefeeds using "preview" expressions (released in v22.2.0) and that access the previous state of the row using the `cdc_prev()` function will no longer work and will need to be recreated with new syntax. #94429 +- Fixed a bug where, when `server.identity_map.configuration` was used, CockroachDB did not verify the client-provided username against the target mappings. Note that **this means that the client must now provide a valid DB username.** This requirement is compatible with PostgreSQL; it was not previously required by CockroachDB but it is now. This does not apply when identity maps are not in use. #94915 +- Previously, the type of the `replicas`, `voting_replicas`,`non_voting_replicas` and `learner_replicas` in `crdb_internal.ranges` were overridden to `INT2VECTOR` causing incompatible indexing between `.ranges` and `.ranges_no_leases`. Now the types of those columns in the two tables are set to `INT[]`. #96287

Security updates

-- Added an option to re-enable "old" cipher suites for use with very old clients. Fixes [issue #1989](https://github.com/cockroachlabs/support/issues/1989). [#95091][#95091] -- Previously, the `ENCRYPTION_PASSPHRASE` option passed to [`RESTORE`]({% link v23.1/restore.md %}) would appear as 'redacted'. It now appears as '******' which is consistent with [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) and [`BACKUP`]({% link v23.1/backup.md %}). [#95562][#95562] +- Added an option to re-enable "old" cipher suites for use with very old clients. Fixes [issue #1989](https://github.com/cockroachlabs/support/issues/1989). #95091 +- Previously, the `ENCRYPTION_PASSPHRASE` option passed to [`RESTORE`]({% link v23.1/restore.md %}) would appear as 'redacted'. It now appears as '******' which is consistent with [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) and [`BACKUP`]({% link v23.1/backup.md %}). #95562

General changes

@@ -35,40 +35,40 @@ With CockroachDB v23.1, the `RANGE DEFAULT` value is lowered to `4h` but only fo Cockroach Labs has found the `25h` value to translate to higher-than-necessary storage costs, especially for workloads where rows are deleted frequently. It can also make for costlier reads with respect to CPU since we currently have to scan over overwritten values to get to the one of interest. Finally, we've also observed cluster instability due to large unsplittable ranges that have accumulated an excessive amount of MVCC garbage. -We chose a default of `25h` originally to accommodate daily incremental backups with revision history. But with the introduction of scheduled backups introduced in v22.2, we no longer need a large GC TTL. Scheduled backups "chain together" and prevent garbage collection of relevant data to ensure coverage of revision history across backups, decoupling it from whatever value is used for GC TTL. The GC TTL determines how far back `AS OF SYSTEM TIME` queries can go, which now if going past `now()-4h`, will start failing informatively. To support larger windows for `AS OF SYSTEM TIME` queries, users are encouraged to pick a more appropriate GC TTL and set it using `ALTER ... CONFIGURE ZONE using gc.ttlseconds = `. The earlier considerations around storage use, read costs, and stability still apply. [#93836][#93836] +We chose a default of `25h` originally to accommodate daily incremental backups with revision history. But with the introduction of scheduled backups introduced in v22.2, we no longer need a large GC TTL. Scheduled backups "chain together" and prevent garbage collection of relevant data to ensure coverage of revision history across backups, decoupling it from whatever value is used for GC TTL. The GC TTL determines how far back `AS OF SYSTEM TIME` queries can go, which now if going past `now()-4h`, will start failing informatively. To support larger windows for `AS OF SYSTEM TIME` queries, users are encouraged to pick a more appropriate GC TTL and set it using `ALTER ... CONFIGURE ZONE using gc.ttlseconds = `. The earlier considerations around storage use, read costs, and stability still apply. #93836

{{ site.data.products.enterprise }} edition changes

-- The `changefeed.active_protected_timestamps.enabled` [[cluster setting]({% link v23.1/cluster-settings.md %})]({% link v23.1/cluster-settings.md %}) has been removed and is now always treated as if it was `true`. [#89975][#89975] -- Improved [changefeed expressions]({% link v23.1/cdc-queries.md %}) logic to rely on the optimizer to evaluate star expansion. [#93979][#93979] -- Changefeed expressions now support system columns. [#93979][#93979] -- Changefeed expressions now have access to the `cdc_prev` tuple which contains the previous state of the row. [#94429][#94429] -- Changefeed expressions now support non-volatile user defined functions (UDFs). [#94429][#94429] -- Changefeed transformations (e.g., `CREATE CHANGEFEED ... AS SELECT ...`) no longer require the `schema_change_policy=stop` option. [#94653][#94653] -- Changefeed transformations introduced in the v22.2 release in preview mode are no longer experimental. This feature can now be considered to be fully production-ready. [#94806][#94806] -- The [`CREATE EXTERNAL CONNECTION`]({% link v23.1/create-external-connection.md %}) statement now supports URIs with the prefixes `azure`, `gs`, `gcpubsub`, `http`, `https`, `webhook-https`, `nodelocal`, `s3`, and `kafka` for use by changefeeds. [#86061][#86061] -- The `CONTROLCHANGEFEED` [role option]({% link v23.1/alter-role.md %}#role-options) will be deprecated in the future (see issue [#94757](https://github.com/cockroachdb/cockroach/issues/94757)). With this change, usages of the `CONTROLCHANGEFEED` role option will come with a deprecation warning. Its existing behavior remains the same. The `SELECT` and `CHANGEFEED` privileges will be used for changefeeds henceforth: +- The `changefeed.active_protected_timestamps.enabled` [[cluster setting]({% link v23.1/cluster-settings.md %})]({% link v23.1/cluster-settings.md %}) has been removed and is now always treated as if it was `true`. #89975 +- Improved [changefeed expressions]({% link v23.1/cdc-queries.md %}) logic to rely on the optimizer to evaluate star expansion. #93979 +- Changefeed expressions now support system columns. #93979 +- Changefeed expressions now have access to the `cdc_prev` tuple which contains the previous state of the row. #94429 +- Changefeed expressions now support non-volatile user defined functions (UDFs). #94429 +- Changefeed transformations (e.g., `CREATE CHANGEFEED ... AS SELECT ...`) no longer require the `schema_change_policy=stop` option. #94653 +- Changefeed transformations introduced in the v22.2 release in preview mode are no longer experimental. This feature can now be considered to be fully production-ready. #94806 +- The [`CREATE EXTERNAL CONNECTION`]({% link v23.1/create-external-connection.md %}) statement now supports URIs with the prefixes `azure`, `gs`, `gcpubsub`, `http`, `https`, `webhook-https`, `nodelocal`, `s3`, and `kafka` for use by changefeeds. #86061 +- The `CONTROLCHANGEFEED` [role option]({% link v23.1/alter-role.md %}#role-options) will be deprecated in the future (see issue #94757). With this change, usages of the `CONTROLCHANGEFEED` role option will come with a deprecation warning. Its existing behavior remains the same. The `SELECT` and `CHANGEFEED` privileges will be used for changefeeds henceforth: - The `SELECT` privilege on a set of tables allows a user to run core changefeeds against them. - The `CHANGEFEED` privilege on a set of tables allows a user to run enterprise changefeeds on them, and also manage the underlying changefeed job (ie. view, pause, cancel, and resume the job). - Notably, a new [cluster setting]({% link v23.1/cluster-settings.md %}) `changefeed.permissions.require_external_connection_sink.enabled` is added and set to `false` by default. Enabling this setting restricts users with `CHANGEFEED` on a set of tables to create enterprise changefeeds into external connections only. To use a given external connection, a user typically needs the `USAGE` privilege on it. Note that `ALTER DEFAULT PRIVILEGES` can be used with both the `CHANGEFEED` and `SELECT` privileges to assign coarse-grained permissions (i.e., assign permissions to all tables in a schema rather than manually assign them for each table). [#94796][#94796] -- Changefeeds created/altered with a `metrics_label` set while `server.child_metrics.enabled` is set to `false` will now provide the user a notice upon creation. [#94948][#94948] -- Fix a bug in [`ALTER CHANGEFEED`]({% link v23.1/alter-changefeed.md %}) that would panic when altering changefeeds to remove a table that has already been dropped. [#95739][#95739] -- Reduced the default size of scan RPC replies to improve cluster stability during changefeed accounting for transient memory. [#95798][#95798] -- Changefeed expressions now support the `changefeed_created_timestamp` function. [#95179][#95179] -- Increased the default `changefeed.memory.per_changefeed_limit` [cluster setting]({% link v23.1/cluster-settings.md %}) to `1/2GiB`. This should result in changefeeds being able to produce larger files. [#96340][#96340] -- The `confluent_schema_registry` URI for avro changefeeds now supports `client_cert` and `client_key` parameters. [#96510][#96510] + Notably, a new [cluster setting]({% link v23.1/cluster-settings.md %}) `changefeed.permissions.require_external_connection_sink.enabled` is added and set to `false` by default. Enabling this setting restricts users with `CHANGEFEED` on a set of tables to create enterprise changefeeds into external connections only. To use a given external connection, a user typically needs the `USAGE` privilege on it. Note that `ALTER DEFAULT PRIVILEGES` can be used with both the `CHANGEFEED` and `SELECT` privileges to assign coarse-grained permissions (i.e., assign permissions to all tables in a schema rather than manually assign them for each table). #94796 +- Changefeeds created/altered with a `metrics_label` set while `server.child_metrics.enabled` is set to `false` will now provide the user a notice upon creation. #94948 +- Fix a bug in [`ALTER CHANGEFEED`]({% link v23.1/alter-changefeed.md %}) that would panic when altering changefeeds to remove a table that has already been dropped. #95739 +- Reduced the default size of scan RPC replies to improve cluster stability during changefeed accounting for transient memory. #95798 +- Changefeed expressions now support the `changefeed_created_timestamp` function. #95179 +- Increased the default `changefeed.memory.per_changefeed_limit` [cluster setting]({% link v23.1/cluster-settings.md %}) to `1/2GiB`. This should result in changefeeds being able to produce larger files. #96340 +- The `confluent_schema_registry` URI for avro changefeeds now supports `client_cert` and `client_key` parameters. #96510

SQL language changes

-- Added the `pg_get_function_arguments` [built-in function]({% link v23.1/functions-and-operators.md %}). This returns the argument list (with defaults) necessary to identify the function with a given OID. [#93675][#93675] -- Added `voting_replicas` and `non_voting_replicas` columns to the output of `SHOW RANGE` and `SHOW RANGES` statements. [#93513][#93513] -- It is now possible to create and alter [`NOT VISIBLE`]({% link v23.1/create-table.md %}#not-visible-property) indexes using the alias `INVISIBLE`. The alias can be used anywhere `NOT VISIBLE` is used when creating or altering indexes. NOTE: the `INVISIBLE` alias is **not supported** for `NOT VISIBLE` columns. [#93750][#93750] -- The `system.table_statistics` table now contains a column called `fullStatisticsID` to store an id referencing the full table statistic the partial statistic was derived from. [#93751][#93751] -- Ordinal column references (e.g., `SELECT @1, @2 FROM t`) are now deprecated. By default, statements using this syntax will now result in an error. If desired, such statements can be allowed using the session setting `SET allow_ordinal_column_references=true`. Support for ordinal column references is scheduled to be removed in upcoming version v23.2. [#93754][#93754] -- The optimizer will now use table statistics that are merged combinations of the newest partial statistic and latest full statistic collection. And, if forecasting is enabled, the merged statistic will be used in the forecast. [#91933][#91933] -- Added the column `indexes_usage` and the index `indexes_usage_idx` on value on the table `system.statement_statistics`. [#93089][#93089] -- Add the `log_timezone` session variable, which is read-only and always `UTC`. [#94123][#94123] -- Added Two new virtual tables `crdb_internal.index_spans` and `.table_spans`, which list the logical keyspace used by each index/table. [#93644][#93644] +- Added the `pg_get_function_arguments` [built-in function]({% link v23.1/functions-and-operators.md %}). This returns the argument list (with defaults) necessary to identify the function with a given OID. #93675 +- Added `voting_replicas` and `non_voting_replicas` columns to the output of `SHOW RANGE` and `SHOW RANGES` statements. #93513 +- It is now possible to create and alter [`NOT VISIBLE`]({% link v23.1/create-table.md %}#not-visible-property) indexes using the alias `INVISIBLE`. The alias can be used anywhere `NOT VISIBLE` is used when creating or altering indexes. NOTE: the `INVISIBLE` alias is **not supported** for `NOT VISIBLE` columns. #93750 +- The `system.table_statistics` table now contains a column called `fullStatisticsID` to store an id referencing the full table statistic the partial statistic was derived from. #93751 +- Ordinal column references (e.g., `SELECT @1, @2 FROM t`) are now deprecated. By default, statements using this syntax will now result in an error. If desired, such statements can be allowed using the session setting `SET allow_ordinal_column_references=true`. Support for ordinal column references is scheduled to be removed in upcoming version v23.2. #93754 +- The optimizer will now use table statistics that are merged combinations of the newest partial statistic and latest full statistic collection. And, if forecasting is enabled, the merged statistic will be used in the forecast. #91933 +- Added the column `indexes_usage` and the index `indexes_usage_idx` on value on the table `system.statement_statistics`. #93089 +- Add the `log_timezone` session variable, which is read-only and always `UTC`. #94123 +- Added Two new virtual tables `crdb_internal.index_spans` and `.table_spans`, which list the logical keyspace used by each index/table. #93644 - The following new statements are introduced: - `SHOW RANGES FROM CURRENT_CATALOG` and `SHOW RANGES` without a parameter: functions as an alias for `SHOW RANGES FROM DATABASE` on the session's current database. - `SHOW RANGES FROM DATABASE ... WITH TABLES`: reports at least one row per table. It is possible for the same range ID to be repeated across multiple rows, when a range spans multiple tables. @@ -77,182 +77,182 @@ We chose a default of `25h` originally to accommodate daily incremental backups - `SHOW CLUSTER RANGES [ WITH { INDEXES | TABLES } ]`: reports ranges across the entire cluster, including ranges that do not contain table data. The behavior of `WITH INDEXES` and `WITH TABLES` is the same as `SHOW RANGES FROM DATABASE`. Additionally, the following new options have been added to the `SHOW RANGES` statement: - `WITH KEYS`: produces the raw bytes of the start/end key boundaries. - `WITH DETAILS`: produces more details, using computations that require extra network roundtrips. This option will make the operation slower overall. - - `WITH EXPLAIN`: produces the text of the SQL query used to run the statement. [#93644][#93644] -- Implemented the `pg_timezone_names` `pg_catalog` table, which lists all supported timezones. [#94122][#94122] -- Improved the performance of trigram operations. [#93757][#93757] -- Previously, CockroachDB would crash if a user creates a [user-defined function (UDF)]({% link v23.1/user-defined-functions.md %}) whose function signature includes a implicit record type (essentially a table) which has a column using a user defined enum type. The root cause was a hydration deadloop when looking up descriptors during hydration. This fix adds a new flag to avoid hydration in order to avoid the deadloop. [#94106][#94106] -- Previously, error messages for missing users sometimes had different forms. This is now unified in the form `role/user "user" does not exist`. [#94677][#94677] -- [User-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) with subqueries in the body of the function are now supported. [#94962][#94962] -- Previously, setting a table's locality was not allowed if the table contained any hash sharded index. This restriction is now removed. [#94436][#94436] -- Users can now add a super region when creating a database. [#93939][#93939] -- [`COPY`]({% link v23.1/copy.md %}) now logs an error during the insert phase on the `SQL_EXEC` logging channel. [#95038][#95038] -- Added a new `REDACT` flag to [`EXPLAIN`]({% link v23.1/explain.md %}) which causes constants, literal values, parameter values, and any other user data to be redacted in explain output. Redacted statement diagnostics bundles can now be collected with [`EXPLAIN ANALYZE (DEBUG, REDACT)`]({% link v23.1/explain-analyze.md %}). [#94950][#94950] + - `WITH EXPLAIN`: produces the text of the SQL query used to run the statement. #93644 +- Implemented the `pg_timezone_names` `pg_catalog` table, which lists all supported timezones. #94122 +- Improved the performance of trigram operations. #93757 +- Previously, CockroachDB would crash if a user creates a [user-defined function (UDF)]({% link v23.1/user-defined-functions.md %}) whose function signature includes a implicit record type (essentially a table) which has a column using a user defined enum type. The root cause was a hydration deadloop when looking up descriptors during hydration. This fix adds a new flag to avoid hydration in order to avoid the deadloop. #94106 +- Previously, error messages for missing users sometimes had different forms. This is now unified in the form `role/user "user" does not exist`. #94677 +- [User-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) with subqueries in the body of the function are now supported. #94962 +- Previously, setting a table's locality was not allowed if the table contained any hash sharded index. This restriction is now removed. #94436 +- Users can now add a super region when creating a database. #93939 +- [`COPY`]({% link v23.1/copy.md %}) now logs an error during the insert phase on the `SQL_EXEC` logging channel. #95038 +- Added a new `REDACT` flag to [`EXPLAIN`]({% link v23.1/explain.md %}) which causes constants, literal values, parameter values, and any other user data to be redacted in explain output. Redacted statement diagnostics bundles can now be collected with [`EXPLAIN ANALYZE (DEBUG, REDACT)`]({% link v23.1/explain-analyze.md %}). #94950 - Added two new virtual tables displaying execution insights for transactions: - `crdb_internal.cluster_txn_execution_insights` - - `crdb_internal.node_txn_execution_insights` [#94720][#94720] -- Some queries which previously resulted in the error "could not decorrelate subquery" now succeed. [#95234][#95234] -- If `copy_from_retries_enabled` is set, [`COPY`]({% link v23.1/copy.md %}) is now able to retry certain safe circumstances: namely when `copy_from_atomic_enabled` is `false`, there is no transaction running `COPY` and the error returned is retriable. This prevents users who keep running into `TransactionProtoWithRefreshError` from having issues. [#95275][#95275] -- Fixed the databases list API when the database name has special characters. [#95209][#95209] -- The pgwire protocol implementation can now accept arguments of the `JSON[]` type (oid=199). Previously, it could only accept `JSONB[]` (oid=3804). Internally, `JSON[]` and `JSONB[]` values are still identical, so this change only affects how the values are received over the wire protocol. [#94705][#94705] -- CPU time spent during SQL execution is now visible in the output of queries run with [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}). This measure does not include CPU time spent while serving KV requests, and CPU time is not shown for queries that perform mutations or for plans that aren't vectorized. This can be useful for diagnosing performance issues and optimizing SQL queries. [#93952][#93952] -- The [`SHOW GRANTS ON EXTERNAL CONNECTION`]({% link v23.1/show-grants.md %}) and [`SHOW SYSTEM GRANTS`]({% link v23.1/show-system-grants.md %}) statements now use a column name of `privilege_type` rather than `privilege`. For external connections, the `name` column has been changed to `connection_name`. This makes the commands consistent with other `SHOW GRANTS` commands. [#95532][#95532] -- The [`SHOW INDEXES`]({% link v23.1/show-index.md %}) statement will now show the expression used to define an index, if one was used. [#95413][#95413] -- It is now possible to run efficient `tsvector @@ tsquery` searches when there is an inverted index on the `tsvector` column being searched. [#93769][#93769] -- Added a `cpuNanos` field to the statistics column of the `crdb_internal.statement_statistics` and `system.statement_statistics` tables that reports the amount of CPU time in nanoseconds during SQL execution for queries that track CPU time. [#95639][#95639] + - `crdb_internal.node_txn_execution_insights` #94720 +- Some queries which previously resulted in the error "could not decorrelate subquery" now succeed. #95234 +- If `copy_from_retries_enabled` is set, [`COPY`]({% link v23.1/copy.md %}) is now able to retry certain safe circumstances: namely when `copy_from_atomic_enabled` is `false`, there is no transaction running `COPY` and the error returned is retriable. This prevents users who keep running into `TransactionProtoWithRefreshError` from having issues. #95275 +- Fixed the databases list API when the database name has special characters. #95209 +- The pgwire protocol implementation can now accept arguments of the `JSON[]` type (oid=199). Previously, it could only accept `JSONB[]` (oid=3804). Internally, `JSON[]` and `JSONB[]` values are still identical, so this change only affects how the values are received over the wire protocol. #94705 +- CPU time spent during SQL execution is now visible in the output of queries run with [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}). This measure does not include CPU time spent while serving KV requests, and CPU time is not shown for queries that perform mutations or for plans that aren't vectorized. This can be useful for diagnosing performance issues and optimizing SQL queries. #93952 +- The [`SHOW GRANTS ON EXTERNAL CONNECTION`]({% link v23.1/show-grants.md %}) and [`SHOW SYSTEM GRANTS`]({% link v23.1/show-system-grants.md %}) statements now use a column name of `privilege_type` rather than `privilege`. For external connections, the `name` column has been changed to `connection_name`. This makes the commands consistent with other `SHOW GRANTS` commands. #95532 +- The [`SHOW INDEXES`]({% link v23.1/show-index.md %}) statement will now show the expression used to define an index, if one was used. #95413 +- It is now possible to run efficient `tsvector @@ tsquery` searches when there is an inverted index on the `tsvector` column being searched. #93769 +- Added a `cpuNanos` field to the statistics column of the `crdb_internal.statement_statistics` and `system.statement_statistics` tables that reports the amount of CPU time in nanoseconds during SQL execution for queries that track CPU time. #95639 - Previously, [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) options would get parsed as `kv_options`, which meant that a user could not pass multiple values to a `SHOW BACKUP` option, causing feature gaps in `SHOW BACKUP` relative to [`BACKUP`]({% link v23.1/backup.md %}) and [`RESTORE`]({% link v23.1/restore.md %}). This patch rewrites the show backup option parser, closing the following feature gaps: - A user can now pass and check multiple KMS URIs in `SHOW BACKUP` - - A user can pass locality-aware `incremental_locations`, allowing a user to also pass the `check_files` parameter to a locality-aware backup chain that also specifies the backup incremental location. [#95562][#95562] -- Updated the name of the `cpuNanos` column to `cpuSQLNanos` on `crdb_internal.statement_statistics` and `system.statement_statistics` [#96278][#96278] -- Some queries with `EXISTS` subqueries which previously resulted in the error "could not decorrelate subquery" now succeed. [#95883][#95883] -- Users can query the `crdb_internal.kv_dropped_relation` table to see which tables, materialized views and sequences are currently already dropped but have not yet been garbage collected, along with the garbage collection TTL setting that is currently in force. This setting originates from the table's own zone configuration, or from its parent database which it inherits, or in turn from the default zone configuration. These settings are typically set using `ALTER TABLE ... CONFIGURE ZONE USING gc.ttlseconds = ...;`. [#96217][#96217] -- Administrators may now call a new [built-in function]({% link v23.1/functions-and-operators.md %}) `crdb_internal.upsert_dropped_relation_gc_ttl` to retroactively set the garbage collection TTL on a table, materialized view, or sequence which has already been dropped. Effectively, this retroactively performs `ALTER TABLE ... CONFIGURE ZONE USING gc.ttlseconds = ...;`. Note that this statement is prevented from being executed on dropped tables because they can no longer be referenced by name at that point. Usage of this built-in is typically in conjunction with the recently-added `crdb_internal.kv_dropped_relations` virtual table. For example, garbage collection can be triggered ASAP for all dropped relations by querying: `SELECT crdb_internal.upsert_dropped_relation_gc_ttl(id, '1 second') FROM crdb_internal.kv_dropped_relations;`. Doing so for all tables in a dropped database requires filtering on the `parent_id` column, the database name being lost at that point. [#96217][#96217] -- Allow `*` expressions in [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}). [#95710][#95710] -- Previously, [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) could be created with any volatility no matter if the function body statements contained any expression which would violate the target volatility. For example, an immutable function might use `random()` in it. This change added validations to guarantee that all statements in the function body should be as strict as the expected UDF volatility. [#96476][#96476] + - A user can pass locality-aware `incremental_locations`, allowing a user to also pass the `check_files` parameter to a locality-aware backup chain that also specifies the backup incremental location. #95562 +- Updated the name of the `cpuNanos` column to `cpuSQLNanos` on `crdb_internal.statement_statistics` and `system.statement_statistics` #96278 +- Some queries with `EXISTS` subqueries which previously resulted in the error "could not decorrelate subquery" now succeed. #95883 +- Users can query the `crdb_internal.kv_dropped_relation` table to see which tables, materialized views and sequences are currently already dropped but have not yet been garbage collected, along with the garbage collection TTL setting that is currently in force. This setting originates from the table's own zone configuration, or from its parent database which it inherits, or in turn from the default zone configuration. These settings are typically set using `ALTER TABLE ... CONFIGURE ZONE USING gc.ttlseconds = ...;`. #96217 +- Administrators may now call a new [built-in function]({% link v23.1/functions-and-operators.md %}) `crdb_internal.upsert_dropped_relation_gc_ttl` to retroactively set the garbage collection TTL on a table, materialized view, or sequence which has already been dropped. Effectively, this retroactively performs `ALTER TABLE ... CONFIGURE ZONE USING gc.ttlseconds = ...;`. Note that this statement is prevented from being executed on dropped tables because they can no longer be referenced by name at that point. Usage of this built-in is typically in conjunction with the recently-added `crdb_internal.kv_dropped_relations` virtual table. For example, garbage collection can be triggered ASAP for all dropped relations by querying: `SELECT crdb_internal.upsert_dropped_relation_gc_ttl(id, '1 second') FROM crdb_internal.kv_dropped_relations;`. Doing so for all tables in a dropped database requires filtering on the `parent_id` column, the database name being lost at that point. #96217 +- Allow `*` expressions in [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}). #95710 +- Previously, [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) could be created with any volatility no matter if the function body statements contained any expression which would violate the target volatility. For example, an immutable function might use `random()` in it. This change added validations to guarantee that all statements in the function body should be as strict as the expected UDF volatility. #96476

Operational changes

-- Updated the metrics `queue.gc.info.clearrangesuccess` and `queue.gc.info.clearrangefailed` to include statistics about GC operations that perform `ClearRange` on parts of the range keyspace. Previously those metrics only included requests to remove range data completely when performing a schema change. [#90830][#90830] -- The load-based splitter has been redesigned to be more consistent with CPU-based rebalancing rather than QPS-based rebalancing to improve range splits. This redesign is disabled by default currently. [#93838][#93838] -- The [cluster setting]({% link v23.1/cluster-settings.md %}) `storage.value_blocks.enabled`, when set to `true`, writes the values of older versions of the same key to separate value blocks in the same sstable. For workloads that create many versions, this can improve the performance of reads by increasing locality. It can also help scan performance with single versions due to an optimization that avoids a key comparison (useful if the key are not very short). The default value of this setting is currently `false`. [#94634][#94634] -- The bytes read from SQL clients prior to authentication are now reported via the metric `sql.pre_serve.bytesin`. Previously, this was reported via the `sql.bytesin` metric. [#92580][#92580] -- Transaction errors will contain more detailed information in redacted logs. [#93760][#93760] -- Added the `COCKROACH_RAFT_MAX_INFLIGHT_BYTES` environment variable, which helps strictly limit inflight traffic from a Raft leader to its followers, particularly in situations when many large messages are sent and significantly exceed `COCKROACH_RAFT_MAX_SIZE_PER_MSG * COCKROACH_RAFT_MAX_INFLIGHT_MSGS` which is a softer limit. [#94692][#94692] -- Log messages for write stalls have been adjusted slightly. [#95436][#95436] -- Histogram metrics can now optionally use the legacy `HdrHistogram` model by setting a new environment variable `COCKROACH_ENABLE_HDR_HISTOGRAMS` to `true` on CockroachDB nodes. **Note that this is not recommended** unless users are having difficulties with the newer Prometheus-backed histogram model. Enabling legacy historgrams can cause performance issues with timeseries databases like Prometheus, as processing and storing the increased number of buckets is taxing on both CPU and storage. Note that the legacy `HdrHistogram` model is slated for full deprecation in upcoming releases. [#96029][#96029] -- Prometheus histograms will now export more buckets across the board to improve precision & fidelity of information reported by histogram metrics, such as quantiles. This will lead to an increase in storage requirements to process these histogram metrics in downstream systems like Prometheus, but should still be a marked improvement when compared to the legacy HdrHistogram model. If users have issues with the precision of these bucket boundaries, they can set the environment variable `COCKROACH_ENABLE_HDR_HISTOGRAMS=true` to revert to using the legacy HdrHistogram model instead, although this is not recommended otherwise as the HdrHistogram strains systems like Prometheus with excessive numbers of histogram buckets. Note that HdrHistograms are slated for full deprecation in upcoming releases. [#96029][#96029] -- In the rare event of a range inconsistency, the consistency checker now saves a storage checkpoint on each storage the range belongs to. Previously, this was a full checkpoint, so its cost could quickly escalate on the nodes that went on running. This change makes the checkpoints partial, i.e., they now only contain the relevant range and its neighbors. This eliminates the time pressure on the cluster operator to remove the checkpoints. [#95963][#95963] -- The count of new SQL connections is now also reported on `sql.pre_serve.new_conns`. [#92580][#92580] -- The count of network bytes sent to report re-authentication errors to a SQL client is now reported via the metric `sql.pre_serve.bytesout` (instead of `sql.bytesout` previously). The count of pre-authentication errors is now reported via the metric `sql.pre_serve.conn.failures` (instead of `sql.conn.failures` previously). [#92580][#92580] +- Updated the metrics `queue.gc.info.clearrangesuccess` and `queue.gc.info.clearrangefailed` to include statistics about GC operations that perform `ClearRange` on parts of the range keyspace. Previously those metrics only included requests to remove range data completely when performing a schema change. #90830 +- The load-based splitter has been redesigned to be more consistent with CPU-based rebalancing rather than QPS-based rebalancing to improve range splits. This redesign is disabled by default currently. #93838 +- The [cluster setting]({% link v23.1/cluster-settings.md %}) `storage.value_blocks.enabled`, when set to `true`, writes the values of older versions of the same key to separate value blocks in the same sstable. For workloads that create many versions, this can improve the performance of reads by increasing locality. It can also help scan performance with single versions due to an optimization that avoids a key comparison (useful if the key are not very short). The default value of this setting is currently `false`. #94634 +- The bytes read from SQL clients prior to authentication are now reported via the metric `sql.pre_serve.bytesin`. Previously, this was reported via the `sql.bytesin` metric. #92580 +- Transaction errors will contain more detailed information in redacted logs. #93760 +- Added the `COCKROACH_RAFT_MAX_INFLIGHT_BYTES` environment variable, which helps strictly limit inflight traffic from a Raft leader to its followers, particularly in situations when many large messages are sent and significantly exceed `COCKROACH_RAFT_MAX_SIZE_PER_MSG * COCKROACH_RAFT_MAX_INFLIGHT_MSGS` which is a softer limit. #94692 +- Log messages for write stalls have been adjusted slightly. #95436 +- Histogram metrics can now optionally use the legacy `HdrHistogram` model by setting a new environment variable `COCKROACH_ENABLE_HDR_HISTOGRAMS` to `true` on CockroachDB nodes. **Note that this is not recommended** unless users are having difficulties with the newer Prometheus-backed histogram model. Enabling legacy historgrams can cause performance issues with timeseries databases like Prometheus, as processing and storing the increased number of buckets is taxing on both CPU and storage. Note that the legacy `HdrHistogram` model is slated for full deprecation in upcoming releases. #96029 +- Prometheus histograms will now export more buckets across the board to improve precision & fidelity of information reported by histogram metrics, such as quantiles. This will lead to an increase in storage requirements to process these histogram metrics in downstream systems like Prometheus, but should still be a marked improvement when compared to the legacy HdrHistogram model. If users have issues with the precision of these bucket boundaries, they can set the environment variable `COCKROACH_ENABLE_HDR_HISTOGRAMS=true` to revert to using the legacy HdrHistogram model instead, although this is not recommended otherwise as the HdrHistogram strains systems like Prometheus with excessive numbers of histogram buckets. Note that HdrHistograms are slated for full deprecation in upcoming releases. #96029 +- In the rare event of a range inconsistency, the consistency checker now saves a storage checkpoint on each storage the range belongs to. Previously, this was a full checkpoint, so its cost could quickly escalate on the nodes that went on running. This change makes the checkpoints partial, i.e., they now only contain the relevant range and its neighbors. This eliminates the time pressure on the cluster operator to remove the checkpoints. #95963 +- The count of new SQL connections is now also reported on `sql.pre_serve.new_conns`. #92580 +- The count of network bytes sent to report re-authentication errors to a SQL client is now reported via the metric `sql.pre_serve.bytesout` (instead of `sql.bytesout` previously). The count of pre-authentication errors is now reported via the metric `sql.pre_serve.conn.failures` (instead of `sql.conn.failures` previously). #92580

Command-line changes

-- The TLS parameters to connect to a [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) cluster from SQL clients have been simplified. [#94421][#94421] -- The password assigned to the `demo` user in [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) can now be overridden by the env var `COCKROACH_DEMO_PASSWORD`. This is meant for use in automated tests, when tests cannot be configured to use TLS client certificate authentication using the client certificates in `$HOME/.cockroach-demo`. [#94421][#94421] -- [`cockroach sql`]({% link v23.1/cockroach-sql.md %}) and [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) now support an `\info` client-side command to print the server details again. [#94421][#94421] -- [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) is now able to preserve open web sessions across restarts of the `cockroach demo` command. The sessions are saved in the directory `~/.cockroach-demo` alongside the TLS certificates and keys. [#94455][#94455] -- The file format used for transient loss of quorum recovery files has changed. It is not possible to use replica info files generated by earlier versions to be used with current and future versions. [#93157][#93157] -- Workloads that take a `--seed` argument previously defaulted to `1`. Now, they use a randomly generated seed in each run. Users can still pass a custom seed with the `--seed` flag. [#95326][#95326] -- Added `cpu_time_per_second` to the `hot-ranges.sh` utility `debug zip` script. [#96213][#96213] +- The TLS parameters to connect to a [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) cluster from SQL clients have been simplified. #94421 +- The password assigned to the `demo` user in [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) can now be overridden by the env var `COCKROACH_DEMO_PASSWORD`. This is meant for use in automated tests, when tests cannot be configured to use TLS client certificate authentication using the client certificates in `$HOME/.cockroach-demo`. #94421 +- [`cockroach sql`]({% link v23.1/cockroach-sql.md %}) and [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) now support an `\info` client-side command to print the server details again. #94421 +- [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) is now able to preserve open web sessions across restarts of the `cockroach demo` command. The sessions are saved in the directory `~/.cockroach-demo` alongside the TLS certificates and keys. #94455 +- The file format used for transient loss of quorum recovery files has changed. It is not possible to use replica info files generated by earlier versions to be used with current and future versions. #93157 +- Workloads that take a `--seed` argument previously defaulted to `1`. Now, they use a randomly generated seed in each run. Users can still pass a custom seed with the `--seed` flag. #95326 +- Added `cpu_time_per_second` to the `hot-ranges.sh` utility `debug zip` script. #96213

DB Console changes

-- Secure clusters now show correct login information in the top right corner. [#94021][#94021] -- The databases filter dropdown in the **Statements** page now uses the `getDatabasesList()` API call, resulting in all cluster databases showing up. [#93657][#93657] -- Added information about the selected plan to the **Explain Plan** tab under **Statement Details**. [#94719][#94719] -- The **Databases** page and the **Databases Details** pages now each contain search and filter components, allowing the ability to search and filter through databases and their tables. [#92589][#92589] -- Graphs can now be clicked on to toggle legend "stickiness" and make the points stop following the mouse. This makes it easier to read dense graphs with many series plotted together. [#92949][#92949] -- The **Statement Fingerprint Details** page in the DB Console no longer infinitely loads after 5 minutes. [#92596][#92596] -- Added a time picker to the **Workload Insights Overview** pages in the DB Console. [#92285][#92285] -- Added all three session status options (`Active`, `Closed`, and `Idle`) and an empty results placeholder. [#95005][#95005] -- Added a list of all statement fingerprints that use a given index to the **Index Details page**. [#94863][#94863] -- Removed `reset sql stats` and `reset index stats` from the DB Console when the user is a non-admin. [#95303][#95303] -- Remove `reset sql stats` from the **Transactions** page for non-admins. [#95461][#95461] -- Added an insights type filter for the **Workload Insights** page, and added an execution status filter for the **Active Execution** pages. [#94002][#94002] -- Hid the apply option for index recommendation when the user is not an admin. [#95516][#95516] -- The Database list filter now shows all databases in the cluster on CC console. [#95450][#95450] -- Hid the filter for the **Databases** and **Database Details** pages if both the node and regions dropdowns are also hidden. [#95376][#95376] +- Secure clusters now show correct login information in the top right corner. #94021 +- The databases filter dropdown in the **Statements** page now uses the `getDatabasesList()` API call, resulting in all cluster databases showing up. #93657 +- Added information about the selected plan to the **Explain Plan** tab under **Statement Details**. #94719 +- The **Databases** page and the **Databases Details** pages now each contain search and filter components, allowing the ability to search and filter through databases and their tables. #92589 +- Graphs can now be clicked on to toggle legend "stickiness" and make the points stop following the mouse. This makes it easier to read dense graphs with many series plotted together. #92949 +- The **Statement Fingerprint Details** page in the DB Console no longer infinitely loads after 5 minutes. #92596 +- Added a time picker to the **Workload Insights Overview** pages in the DB Console. #92285 +- Added all three session status options (`Active`, `Closed`, and `Idle`) and an empty results placeholder. #95005 +- Added a list of all statement fingerprints that use a given index to the **Index Details page**. #94863 +- Removed `reset sql stats` and `reset index stats` from the DB Console when the user is a non-admin. #95303 +- Remove `reset sql stats` from the **Transactions** page for non-admins. #95461 +- Added an insights type filter for the **Workload Insights** page, and added an execution status filter for the **Active Execution** pages. #94002 +- Hid the apply option for index recommendation when the user is not an admin. #95516 +- The Database list filter now shows all databases in the cluster on CC console. #95450 +- Hid the filter for the **Databases** and **Database Details** pages if both the node and regions dropdowns are also hidden. #95376 - The introduction of the **Key Visualizer** makes it easy to identify historical hotspots. To support this new feature, three new [cluster settings]({% link v23.1/cluster-settings.md %}) are introduced: - `keyvisualizer.enabled`: enables the Key Visualizer. - `keyvisualizer.sample_interval`: controls the frequency at which the Key Visualizer collects samples. - - `keyvisualizer.max_buckets` controls the maximum number of buckets in a sample. [#88353][#88353] -- Added a CPU Time chart to the **Statement Details** page. [#95796][#95796] -- Added CPU time as a column on the **Statement and Transaction** tables. [#95795][#95795] -- Hid the list of used fingerprints per index on the **Index Details** page for non-admin users. [#95997][#95997] -- Added write bytes, write keys, read bytes, read keys and cpu statistics to the `/hotranges` DB Console page. These statistics are the rated average over the last 30 minutes. [#95190][#95190] -- The loading spinner on the **Insights Overview** page now only shows when a request is pending and either the data is nullish or invalid (i.e., a new time range selected). [#93219][#93219] -- Previously, transaction start time for the **Transaction Insights** page was only available for transaction insights with high contention as the issue. This information is now available for all transaction insights: start time, end time, and time elapsed. [#93219][#93219] -- Added search, filter and time picker for the list of most-used statement fingerprints on the **Index Details** page. [#96112][#96112] -- Added CPU Time to the **Statement and Transaction Insights** page. [#96279][#96279] -- Previously, the stale node metrics displayed in the **Cluster Overview Nodes** table may mislead users in to thinking that they are current values when in fact they are stale. Now these metrics are displayed with a stale tag, allowing for users to be informed about the staleness of the data displayed to them regarding dead nodes. [#95868][#95868] + - `keyvisualizer.max_buckets` controls the maximum number of buckets in a sample. #88353 +- Added a CPU Time chart to the **Statement Details** page. #95796 +- Added CPU time as a column on the **Statement and Transaction** tables. #95795 +- Hid the list of used fingerprints per index on the **Index Details** page for non-admin users. #95997 +- Added write bytes, write keys, read bytes, read keys and cpu statistics to the `/hotranges` DB Console page. These statistics are the rated average over the last 30 minutes. #95190 +- The loading spinner on the **Insights Overview** page now only shows when a request is pending and either the data is nullish or invalid (i.e., a new time range selected). #93219 +- Previously, transaction start time for the **Transaction Insights** page was only available for transaction insights with high contention as the issue. This information is now available for all transaction insights: start time, end time, and time elapsed. #93219 +- Added search, filter and time picker for the list of most-used statement fingerprints on the **Index Details** page. #96112 +- Added CPU Time to the **Statement and Transaction Insights** page. #96279 +- Previously, the stale node metrics displayed in the **Cluster Overview Nodes** table may mislead users in to thinking that they are current values when in fact they are stale. Now these metrics are displayed with a stale tag, allowing for users to be informed about the staleness of the data displayed to them regarding dead nodes. #95868

Bug fixes

-- Fixed a bug where the `session_id` session variable would not be properly set if used from a subquery. [#93748][#93748] -- Fixed a bug where CockroachDB could, in rare cases, encounter an internal error when evaluating the `crdb_internal.range_stats` [built-in function]({% link v23.1/functions-and-operators.md %}) (which powers the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) command among other things). The bug was introduced in v22.2.0 and is now fixed. [#93868][#93868] -- Fixed a bug that could prevent `CASE` expressions that used placeholder return values from type-checking correctly. [#93855][#93855] -- Updated the volatility of the `hmac`, `digest`, and `crypt` built-ins to be immutable. [#93628][#93628] -- Server logs are now correctly fsynced at every syncInterval. [#93956][#93956] -- The `stxnamespace`, `stxkind` and `stxstattarget` columns are now defined in `pg_statistics_ext`. [#93274][#93274] -- Fixed a bug where, when experimental MVCC range tombstones are enabled (they are disabled by default), a bulk ingestion (e.g., an `IMPORT`) could fail to take a committed-but-unresolved write intent into account during conflict checks when written above an MVCC range tombstone. It was therefore possible in very rare circumstances for the ingestion to write a value below the timestamp of the committed intent, causing the ingested value to disappear. [#93938][#93938] -- This patch fixes `JOIN` queries involving tables with unique constraints using `LIMIT`, `GROUP BY`, and `ORDER BY` clauses to ensure that the optimizer considers streaming group-by with no `TopK` operation, when possible. This is often the most efficient query plan. [#93858][#93858] -- Secure clusters now show correct login information in the top right corner. [#94021][#94021] -- Previously, CockroachDB could crash in rare circumstances when evaluating lookup and index joins. The bug has been present since the v22.2.0 release. You can also employ a temporary workaround without upgrading to the release with this fix by changing the value of the undocumented [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.distsql.use_streamer.enabled` to `false`. [#94031][#94031] -- The `CREATE ROLE`, `DELETE ROLE`, `GRANT`, and `REVOKE` statements no longer function when the transaction is in read-only mode. [#93991][#93991] -- Fixed a bug where, when experimental MVCC range tombstones are enabled (they are disabled by default), a bulk ingestion (e.g., an `IMPORT`) could, in some situations, fail to properly check for conflicts with existing MVCC range tombstones. This could cause the ingestion to write below a recently-written MVCC range tombstone, in turn losing the ingested data. This could only happen in rare circumstances where a bulk ingestion was applied concurrently with an import cancellation. [#94045][#94045] -- Fixed a bug where, in some cases, the start/end key columns of the output of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) was missing. [#93644][#93644] -- Fixed a bug where trigrams ignored unicode (multi-byte) characters from input strings. [#93757][#93757] -- Fixed a bug that could happen when type-checking an array expression that only contains NULLs and placeholder values. The bug was only present in v22.2.1. [#94207][#94207] -- Fixed a bug where tables which receive writes concurrently with portions of an `ALTER TABLE ... SET LOCALITY REGIONAL BY ROW` may fail with an error: `duplicate key value violates unique constraint "new_primary_key"`. This bug was introduced in 22.1. [#94151][#94151] -- Fixed a bug where CockroachDB could encounter an internal error when evaluating window functions with `RANGE` window frame mode with an `OFFSET PRECEDING` or `OFFSET FOLLOWING` boundary set when an `ORDER BY` clause has the `NULLS LAST` option set. This will now result in a regular error since the feature is marked as unsupported. [#94342][#94342] -- Previously, CockroachDB could delay the release of the locks acquired when evaluating `SELECT FOR UPDATE` statements in some cases. This delay (up to 5s) could then block future readers. The bug was introduced in 22.2.0, and the temporary workaround without upgrading to a release with this fix is to set the undocumented [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.distsql.use_streamer.enabled` to `false`. [#94399][#94399] -- Record types can now be encoded with the binary encoding of the Postgres wire protocol. Previously, trying to use this encoding could case a panic. [#94405][#94405] -- Fixed a panic that could be caused when using a SQL cursor to access tables in the `crdb_internal` schema. [#94425][#94425] -- Fixed a bug in which `RESTORE SYSTEM USERS` would fail to restore [role options]({% link v23.1/alter-role.md %}#role-options). [#94134][#94134] -- Fixed a crash that could occur on the gateway node when collecting a statement diagnostics bundle (e.g., [`EXPLAIN ANALYZE (DEBUG)`]({% link v23.1/explain-analyze.md %})) on a statement that fails with certain errors. This crash has existed in various forms since the introduction of statement bundles in v20.1.0. [#94440][#94440] -- Fixed a recursive CTE expressions which cause internal errors when explicit CASTs of initial expressions to output types are missing. [#94581][#94581] -- Previously, certain `GRANT` or `REVOKE` commands on a user which does not exist would error with the correct PG code. This is now fixed. [#94677][#94677] -- The `pg_proc` and `pg_type` virtual OID indexes no longer incorrectly show cross-db objects. [#94339][#94339] -- It is now possible to run `cockroach version` and `cockroach start` (and possibly other sub-commands) when the user running the command does not have permission to access the current working directory. [#94894][#94894] -- It is now possible to use a directory whose name is `"disabled"` via `--external-io-dir`. To actually disable external I/O, use `--external-io-dir=""`. [#94985][#94985] -- This patch fixes an internal error occurring in CASE expressions when a column present in a `THEN` or `ELSE` expression is of an inequivalent type compared to that of a constant this column is compared to in an equality predicate, e.g., `(CASE WHEN false THEN int_col ELSE 1 END) IN (int_col) AND int_col=3/2`. [#94791][#94791] -- The `pronamespace` column of the `pg_proc` table now correctly reports the `crdb_internal` schema for [built-in functions]({% link v23.1/functions-and-operators.md %}) that have the "crdb_internal" prefix. [#95029][#95029] -- In the SQL shell ([`cockroach sql`]({% link v23.1/cockroach-sql.md %}) / [`cockroach demo`]({% link v23.1/cockroach-demo.md %})), when using `\c`/`\connect` to connect to a different server, CockroachDB would previously report an error if the new server had a different cluster ID. This has been fixed: this situation is merely a warning. [#95219][#95219] -- When using identity maps (via `server.identity_map.configuration`), authentication now correctly verifies that the client-provided username matches at least one of the mappings for the system identity. Previously, the client-provided username was incorrectly ignored and authentication could fail if the first candidate mapping did not result in a valid DB username. [#94915][#94915] -- The `prokind` column of `pg_catalog.pg_proc` is now populated properly. [#95289][#95289] -- `pg_catalog.pg_description` and `pg_catalog.obj_description()` are now able to retrieve the descriptive help for [built-in functions]({% link v23.1/functions-and-operators.md %}). [#95294][#95294] -- Fixed a crash that could occur when formatting a tuple with an unknown type. [#95009][#95009] -- Fixed a bug where a DNS lookup was performed during gossip remote forwarding while holding the gossip mutex. This could cause processing stalls if the DNS server was slow to respond. [#95426][#95426] -- Fixed a bug where `CLOSE ALL` would not respect the "ALL" flag and would instead attempt to close a cursor with no name. [#95414][#95414] -- Fixed a bug where default expressions could have a type which differs from the type of the column as long as the expression's type could be cast in an assignment context, which could lead to a panic during a backfill if such a default expression was added to a new column. This bug was introduced in v22.2.0. [#95398][#95398] -- DB Console features that check for the `VIEWACTIVITYREDACTED` privilege now also account for [system-level privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges). [#95258][#95258] -- CockroachDB now supports receiving `regXXX`-type values in binary extended protocol. [#94355][#94355] -- Operations like [`BACKUP`]({% link v23.1/backup.md %}) can now reuse a previously created AWS KMS client if the client was created with the same parameters. This addresses the `NoCredentialProviders` errors on EC2 for backups with long incremental chains. [#95026][#95026] -- The syntax `EXPLAIN (DDL) COMMENT ON` is now possible. [#95467][#95467] -- Fixed a bug where a database [`RESTORE`]({% link v23.1/restore.md %}) would not grant `CREATE` and `USAGE` on the public schema to the public role. [#95466][#95466] -- Fixed the formatting of JSON values inside of a SQL array so they no longer have improper quoting. [#94705][#94705] -- Fixed a bug which could cause expressions with side-effects (e.g., volatile expressions or data-modifying statements like `INSERT`) in `NOT MATERIALIZED` CTEs to be executed more than once. This bug was present since `NOT MATERIALIZED` was first supported in version 20.2.0. [#95454][#95454] -- Fixed `pg_get_indexdef` so that it shows the expression used to define an expression-based index. In addition, the function was previously including columns stored by the index, which was incorrect and has now also been fixed. [#95413][#95413] -- Reduced register session, deregister session, and session cancel query contention. [#95553][#95553] -- Fixed a bug that could cause arguments of a [`COALESCE`]({% link v23.1/functions-and-operators.md %}#conditional-and-function-like-operators) statement to be evaluated when previous arguments always evaluated to non-NULL values. This bug could cause query errors to originate from arguments of a `COALESCE` that should have never been evaluated. [#95599][#95599] -- Fixed the `array_to_string` [built-in function]({% link v23.1/functions-and-operators.md %}) so that nested arrays are traversed without printing 'ARRAY' at each nesting level. [#95802][#95802] -- Fixed a bug where using the [`COPY`]({% link v23.1/copy.md %}) statement to copy data into a column with collated strings would result in an error similar to `internal error: unknown type collatedstring`. [#95894][#95894] -- The content of `column_default` in `information_schema.columns` no longer has type annotations. [#94153][#94153] -- Fixed a long-standing bug that caused incorrect parsing of double-quoted identifiers separated by one or more newlines. [#96019][#96019] -- Fixed a rare internal errors in `LATERAL` queries with redundant function calls. [#96048][#96048] -- Fixed an internal error which may occur in the `SHOW RANGE FROM TABLE` statement when the `FOR ROW` clause specifies a `BYTE` literal and the corresponding column data type is `BIT`. [#96002][#96002] -- Fixed an internal errors which may occur on some `AS OF SYSTEM TIME` expressions. [#96113][#96113] -- Fixed a bug where a node with a disk stall would continue to accept new connections and preserve existing connections until the disk stall abated. [#96371][#96371] -- Fixed a bug where a [`RESTORE`]({% link v23.1/restore.md %}) flow could hang indefinitely in the face of a context cancellation, manifesting as a stuck restore job. [#96302][#96302] -- Fixed a bug where the `NOSQLLOGIN` [system-level privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) had a bug that made CockroachDB ignore it entirely, so it had no effect. The bug was introduced in v22.2.0-alpha.1. The `NOSQLLOGIN` [role option]({% link v23.1/alter-role.md %}#role-options) is unaffected by this bug. [#96466][#96466] -- The compatibility scalar functions in `information_schema` are now listed in the proper namespace in `pg_catalog.pg_proc`. [#96562][#96562] -- Fixed a bug in which the [`CREATE SCHEDULE`]({% link v23.1/create-schedule-for-backup.md %}) statement would not properly handle a placeholder for the `revision_history` option. [#95675][#95675] +- Fixed a bug where the `session_id` session variable would not be properly set if used from a subquery. #93748 +- Fixed a bug where CockroachDB could, in rare cases, encounter an internal error when evaluating the `crdb_internal.range_stats` [built-in function]({% link v23.1/functions-and-operators.md %}) (which powers the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) command among other things). The bug was introduced in v22.2.0 and is now fixed. #93868 +- Fixed a bug that could prevent `CASE` expressions that used placeholder return values from type-checking correctly. #93855 +- Updated the volatility of the `hmac`, `digest`, and `crypt` built-ins to be immutable. #93628 +- Server logs are now correctly fsynced at every syncInterval. #93956 +- The `stxnamespace`, `stxkind` and `stxstattarget` columns are now defined in `pg_statistics_ext`. #93274 +- Fixed a bug where, when experimental MVCC range tombstones are enabled (they are disabled by default), a bulk ingestion (e.g., an `IMPORT`) could fail to take a committed-but-unresolved write intent into account during conflict checks when written above an MVCC range tombstone. It was therefore possible in very rare circumstances for the ingestion to write a value below the timestamp of the committed intent, causing the ingested value to disappear. #93938 +- This patch fixes `JOIN` queries involving tables with unique constraints using `LIMIT`, `GROUP BY`, and `ORDER BY` clauses to ensure that the optimizer considers streaming group-by with no `TopK` operation, when possible. This is often the most efficient query plan. #93858 +- Secure clusters now show correct login information in the top right corner. #94021 +- Previously, CockroachDB could crash in rare circumstances when evaluating lookup and index joins. The bug has been present since the v22.2.0 release. You can also employ a temporary workaround without upgrading to the release with this fix by changing the value of the undocumented [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.distsql.use_streamer.enabled` to `false`. #94031 +- The `CREATE ROLE`, `DELETE ROLE`, `GRANT`, and `REVOKE` statements no longer function when the transaction is in read-only mode. #93991 +- Fixed a bug where, when experimental MVCC range tombstones are enabled (they are disabled by default), a bulk ingestion (e.g., an `IMPORT`) could, in some situations, fail to properly check for conflicts with existing MVCC range tombstones. This could cause the ingestion to write below a recently-written MVCC range tombstone, in turn losing the ingested data. This could only happen in rare circumstances where a bulk ingestion was applied concurrently with an import cancellation. #94045 +- Fixed a bug where, in some cases, the start/end key columns of the output of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) was missing. #93644 +- Fixed a bug where trigrams ignored unicode (multi-byte) characters from input strings. #93757 +- Fixed a bug that could happen when type-checking an array expression that only contains NULLs and placeholder values. The bug was only present in v22.2.1. #94207 +- Fixed a bug where tables which receive writes concurrently with portions of an `ALTER TABLE ... SET LOCALITY REGIONAL BY ROW` may fail with an error: `duplicate key value violates unique constraint "new_primary_key"`. This bug was introduced in 22.1. #94151 +- Fixed a bug where CockroachDB could encounter an internal error when evaluating window functions with `RANGE` window frame mode with an `OFFSET PRECEDING` or `OFFSET FOLLOWING` boundary set when an `ORDER BY` clause has the `NULLS LAST` option set. This will now result in a regular error since the feature is marked as unsupported. #94342 +- Previously, CockroachDB could delay the release of the locks acquired when evaluating `SELECT FOR UPDATE` statements in some cases. This delay (up to 5s) could then block future readers. The bug was introduced in 22.2.0, and the temporary workaround without upgrading to a release with this fix is to set the undocumented [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.distsql.use_streamer.enabled` to `false`. #94399 +- Record types can now be encoded with the binary encoding of the Postgres wire protocol. Previously, trying to use this encoding could case a panic. #94405 +- Fixed a panic that could be caused when using a SQL cursor to access tables in the `crdb_internal` schema. #94425 +- Fixed a bug in which `RESTORE SYSTEM USERS` would fail to restore [role options]({% link v23.1/alter-role.md %}#role-options). #94134 +- Fixed a crash that could occur on the gateway node when collecting a statement diagnostics bundle (e.g., [`EXPLAIN ANALYZE (DEBUG)`]({% link v23.1/explain-analyze.md %})) on a statement that fails with certain errors. This crash has existed in various forms since the introduction of statement bundles in v20.1.0. #94440 +- Fixed a recursive CTE expressions which cause internal errors when explicit CASTs of initial expressions to output types are missing. #94581 +- Previously, certain `GRANT` or `REVOKE` commands on a user which does not exist would error with the correct PG code. This is now fixed. #94677 +- The `pg_proc` and `pg_type` virtual OID indexes no longer incorrectly show cross-db objects. #94339 +- It is now possible to run `cockroach version` and `cockroach start` (and possibly other sub-commands) when the user running the command does not have permission to access the current working directory. #94894 +- It is now possible to use a directory whose name is `"disabled"` via `--external-io-dir`. To actually disable external I/O, use `--external-io-dir=""`. #94985 +- This patch fixes an internal error occurring in CASE expressions when a column present in a `THEN` or `ELSE` expression is of an inequivalent type compared to that of a constant this column is compared to in an equality predicate, e.g., `(CASE WHEN false THEN int_col ELSE 1 END) IN (int_col) AND int_col=3/2`. #94791 +- The `pronamespace` column of the `pg_proc` table now correctly reports the `crdb_internal` schema for [built-in functions]({% link v23.1/functions-and-operators.md %}) that have the "crdb_internal" prefix. #95029 +- In the SQL shell ([`cockroach sql`]({% link v23.1/cockroach-sql.md %}) / [`cockroach demo`]({% link v23.1/cockroach-demo.md %})), when using `\c`/`\connect` to connect to a different server, CockroachDB would previously report an error if the new server had a different cluster ID. This has been fixed: this situation is merely a warning. #95219 +- When using identity maps (via `server.identity_map.configuration`), authentication now correctly verifies that the client-provided username matches at least one of the mappings for the system identity. Previously, the client-provided username was incorrectly ignored and authentication could fail if the first candidate mapping did not result in a valid DB username. #94915 +- The `prokind` column of `pg_catalog.pg_proc` is now populated properly. #95289 +- `pg_catalog.pg_description` and `pg_catalog.obj_description()` are now able to retrieve the descriptive help for [built-in functions]({% link v23.1/functions-and-operators.md %}). #95294 +- Fixed a crash that could occur when formatting a tuple with an unknown type. #95009 +- Fixed a bug where a DNS lookup was performed during gossip remote forwarding while holding the gossip mutex. This could cause processing stalls if the DNS server was slow to respond. #95426 +- Fixed a bug where `CLOSE ALL` would not respect the "ALL" flag and would instead attempt to close a cursor with no name. #95414 +- Fixed a bug where default expressions could have a type which differs from the type of the column as long as the expression's type could be cast in an assignment context, which could lead to a panic during a backfill if such a default expression was added to a new column. This bug was introduced in v22.2.0. #95398 +- DB Console features that check for the `VIEWACTIVITYREDACTED` privilege now also account for [system-level privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges). #95258 +- CockroachDB now supports receiving `regXXX`-type values in binary extended protocol. #94355 +- Operations like [`BACKUP`]({% link v23.1/backup.md %}) can now reuse a previously created AWS KMS client if the client was created with the same parameters. This addresses the `NoCredentialProviders` errors on EC2 for backups with long incremental chains. #95026 +- The syntax `EXPLAIN (DDL) COMMENT ON` is now possible. #95467 +- Fixed a bug where a database [`RESTORE`]({% link v23.1/restore.md %}) would not grant `CREATE` and `USAGE` on the public schema to the public role. #95466 +- Fixed the formatting of JSON values inside of a SQL array so they no longer have improper quoting. #94705 +- Fixed a bug which could cause expressions with side-effects (e.g., volatile expressions or data-modifying statements like `INSERT`) in `NOT MATERIALIZED` CTEs to be executed more than once. This bug was present since `NOT MATERIALIZED` was first supported in version 20.2.0. #95454 +- Fixed `pg_get_indexdef` so that it shows the expression used to define an expression-based index. In addition, the function was previously including columns stored by the index, which was incorrect and has now also been fixed. #95413 +- Reduced register session, deregister session, and session cancel query contention. #95553 +- Fixed a bug that could cause arguments of a [`COALESCE`]({% link v23.1/functions-and-operators.md %}#conditional-and-function-like-operators) statement to be evaluated when previous arguments always evaluated to non-NULL values. This bug could cause query errors to originate from arguments of a `COALESCE` that should have never been evaluated. #95599 +- Fixed the `array_to_string` [built-in function]({% link v23.1/functions-and-operators.md %}) so that nested arrays are traversed without printing 'ARRAY' at each nesting level. #95802 +- Fixed a bug where using the [`COPY`]({% link v23.1/copy.md %}) statement to copy data into a column with collated strings would result in an error similar to `internal error: unknown type collatedstring`. #95894 +- The content of `column_default` in `information_schema.columns` no longer has type annotations. #94153 +- Fixed a long-standing bug that caused incorrect parsing of double-quoted identifiers separated by one or more newlines. #96019 +- Fixed a rare internal errors in `LATERAL` queries with redundant function calls. #96048 +- Fixed an internal error which may occur in the `SHOW RANGE FROM TABLE` statement when the `FOR ROW` clause specifies a `BYTE` literal and the corresponding column data type is `BIT`. #96002 +- Fixed an internal errors which may occur on some `AS OF SYSTEM TIME` expressions. #96113 +- Fixed a bug where a node with a disk stall would continue to accept new connections and preserve existing connections until the disk stall abated. #96371 +- Fixed a bug where a [`RESTORE`]({% link v23.1/restore.md %}) flow could hang indefinitely in the face of a context cancellation, manifesting as a stuck restore job. #96302 +- Fixed a bug where the `NOSQLLOGIN` [system-level privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) had a bug that made CockroachDB ignore it entirely, so it had no effect. The bug was introduced in v22.2.0-alpha.1. The `NOSQLLOGIN` [role option]({% link v23.1/alter-role.md %}#role-options) is unaffected by this bug. #96466 +- The compatibility scalar functions in `information_schema` are now listed in the proper namespace in `pg_catalog.pg_proc`. #96562 +- Fixed a bug in which the [`CREATE SCHEDULE`]({% link v23.1/create-schedule-for-backup.md %}) statement would not properly handle a placeholder for the `revision_history` option. #95675

Performance improvements

-- The optimizer can now avoid planning a sort in more cases with joins that perform lookups into an index with one or more columns sorted in descending order. This can significantly decrease the number of rows that have to be scanned in order to satisfy a [`LIMIT`]({% link v23.1/limit-offset.md %}) clause. [#93673][#93673] -- Significantly reduced CPU usage of the underlying gossip network in large clusters. [#89613][#89613] -- Refactored the query logic when fetching database index recommendations for the `DatabaseDetails` API endpoint, greatly reducing the query time and cost, particularly for large schemas. [#93937][#93937] -- Improved performance when populating `crdb_internal.default_privileges`. [#94247][#94247] -- Some types of [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) are now inlined in query plans as relation expressions, which speeds up their evaluation. UDFs must be non-volatile and have a single statement in the function body to be inlined. [#92955][#92955] -- Improved the performance of `pg_{function,table,type}_is_visible`. [#94339][#94339] -- Long chains of incremental backups and restore of such chains will now allocate less memory during the unmarshaling of metadata. [#93997][#93997] -- Extended the RPC compression encoding with a length prefixing format, allowing more efficient decompression on receivers. [#93871][#93871] -- When planning lookup joins with a [`LIMIT`]({% link v23.1/limit-offset.md %}) clause from a `REGIONAL BY ROW` input table, the optimizer will now explore a locality-optimized-search plan of two lookup joins to possibly avoid high latency of reading rows in a remote region. [#93377][#93377] -- Improve the performance of the `SHOW FUNCTIONS` statement. [#94771][#94771] -- Improved the performance of several PostgreSQL compatibility built-ins. [#94771][#94771] -- In some cases, when planning an inverted zigzag join, the optimizer can now detect whether it is necessary to re-apply the filter after the zigzag join. If it is not necessary, the optimizer can produce a more efficient plan. [#95638][#95638] -- Fixed a bug which could lead to very slow drop when tables or views have a very large number of columns (greater than 1000). [#95850][#95850] -- In 22.2, CockroachDB introduced support for `DISCARD TEMP` and made `DISCARD ALL` actually discard temp tables. This implementation ran expensive logic to discover temporary schemas rather than consulting in-memory data structures. As a result, `DISCARD ALL`, which is issued regularly by connection pools, became an expensive operation when it should be cheap. This problem is now resolved. [#95876][#95876] -- In 22.2, logic was added to make `SET SESSION AUTHORIZATION DEFAULT` not a no-op. This implementation used more general code for setting the role for a session which made sure that the role exists. This improves the performance of the `DISCARD ALL` and `SET SESSION AUTHORIZATION DEFAULT` statements. [#95876][#95876] -- The optimizer now plans inverted index scans for queries that filter by JSON fetch value operators (`->`) with integer indices, e.g, `json_col->0 = '{"b": "c"}'`. [#95848][#95848] -- The optimizer now plans inverted index scans for queries that filter by JSON fetch value operators (`->`) with integer indices alongside the "contains" or the "contained by" operators, e.g, `json_col->0 @> '{"b": "c"}' or json_col->0 <@ '{"b": "c"}'`. [#96202][#96202] -- Added delegated snapshots which can reduce WAN traffic for snapshot movement. If there is another replica for this range with a closer locality than the delegate, the leaseholder will attempt to have that delegate send the snapshot. This is particularly useful in the case of a decommission of a node where most snapshots are transferred to another replica in the same locality. [#83991][#83991] -- The Raft proposal pipeline has been optimized to reduce interference between Raft proposals. This improves average and tail write latency at high concurrency. [#94165][#94165] +- The optimizer can now avoid planning a sort in more cases with joins that perform lookups into an index with one or more columns sorted in descending order. This can significantly decrease the number of rows that have to be scanned in order to satisfy a [`LIMIT`]({% link v23.1/limit-offset.md %}) clause. #93673 +- Significantly reduced CPU usage of the underlying gossip network in large clusters. #89613 +- Refactored the query logic when fetching database index recommendations for the `DatabaseDetails` API endpoint, greatly reducing the query time and cost, particularly for large schemas. #93937 +- Improved performance when populating `crdb_internal.default_privileges`. #94247 +- Some types of [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) are now inlined in query plans as relation expressions, which speeds up their evaluation. UDFs must be non-volatile and have a single statement in the function body to be inlined. #92955 +- Improved the performance of `pg_{function,table,type}_is_visible`. #94339 +- Long chains of incremental backups and restore of such chains will now allocate less memory during the unmarshaling of metadata. #93997 +- Extended the RPC compression encoding with a length prefixing format, allowing more efficient decompression on receivers. #93871 +- When planning lookup joins with a [`LIMIT`]({% link v23.1/limit-offset.md %}) clause from a `REGIONAL BY ROW` input table, the optimizer will now explore a locality-optimized-search plan of two lookup joins to possibly avoid high latency of reading rows in a remote region. #93377 +- Improve the performance of the `SHOW FUNCTIONS` statement. #94771 +- Improved the performance of several PostgreSQL compatibility built-ins. #94771 +- In some cases, when planning an inverted zigzag join, the optimizer can now detect whether it is necessary to re-apply the filter after the zigzag join. If it is not necessary, the optimizer can produce a more efficient plan. #95638 +- Fixed a bug which could lead to very slow drop when tables or views have a very large number of columns (greater than 1000). #95850 +- In 22.2, CockroachDB introduced support for `DISCARD TEMP` and made `DISCARD ALL` actually discard temp tables. This implementation ran expensive logic to discover temporary schemas rather than consulting in-memory data structures. As a result, `DISCARD ALL`, which is issued regularly by connection pools, became an expensive operation when it should be cheap. This problem is now resolved. #95876 +- In 22.2, logic was added to make `SET SESSION AUTHORIZATION DEFAULT` not a no-op. This implementation used more general code for setting the role for a session which made sure that the role exists. This improves the performance of the `DISCARD ALL` and `SET SESSION AUTHORIZATION DEFAULT` statements. #95876 +- The optimizer now plans inverted index scans for queries that filter by JSON fetch value operators (`->`) with integer indices, e.g, `json_col->0 = '{"b": "c"}'`. #95848 +- The optimizer now plans inverted index scans for queries that filter by JSON fetch value operators (`->`) with integer indices alongside the "contains" or the "contained by" operators, e.g, `json_col->0 @> '{"b": "c"}' or json_col->0 <@ '{"b": "c"}'`. #96202 +- Added delegated snapshots which can reduce WAN traffic for snapshot movement. If there is another replica for this range with a closer locality than the delegate, the leaseholder will attempt to have that delegate send the snapshot. This is particularly useful in the case of a decommission of a node where most snapshots are transferred to another replica in the same locality. #83991 +- The Raft proposal pipeline has been optimized to reduce interference between Raft proposals. This improves average and tail write latency at high concurrency. #94165

Build changes

-- The native binary for Cypress is now only downloaded and installed when UI end-to-end tests are run, instead of eagerly downloading it on all platforms at build-time. This restores the ability for non-{Windows, Darwin, Linux} platforms like FreeBSD and illumos to build CRDB without modifications, which broke in the initial 22.2 release. [#93800][#93800] -- Required NPM dependencies are now fetched from a Google Cloud Storage bucket managed by Cockroach Labs instead of from a git submodule committed in-tree. [#94152][#94152] +- The native binary for Cypress is now only downloaded and installed when UI end-to-end tests are run, instead of eagerly downloading it on all platforms at build-time. This restores the ability for non-{Windows, Darwin, Linux} platforms like FreeBSD and illumos to build CRDB without modifications, which broke in the initial 22.2 release. #93800 +- Required NPM dependencies are now fetched from a Google Cloud Storage bucket managed by Cockroach Labs instead of from a git submodule committed in-tree. #94152
@@ -266,245 +266,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#83991]: https://github.com/cockroachdb/cockroach/pull/83991 -[#86061]: https://github.com/cockroachdb/cockroach/pull/86061 -[#88353]: https://github.com/cockroachdb/cockroach/pull/88353 -[#89613]: https://github.com/cockroachdb/cockroach/pull/89613 -[#89975]: https://github.com/cockroachdb/cockroach/pull/89975 -[#90830]: https://github.com/cockroachdb/cockroach/pull/90830 -[#91933]: https://github.com/cockroachdb/cockroach/pull/91933 -[#92285]: https://github.com/cockroachdb/cockroach/pull/92285 -[#92580]: https://github.com/cockroachdb/cockroach/pull/92580 -[#92589]: https://github.com/cockroachdb/cockroach/pull/92589 -[#92596]: https://github.com/cockroachdb/cockroach/pull/92596 -[#92949]: https://github.com/cockroachdb/cockroach/pull/92949 -[#92955]: https://github.com/cockroachdb/cockroach/pull/92955 -[#93068]: https://github.com/cockroachdb/cockroach/pull/93068 -[#93089]: https://github.com/cockroachdb/cockroach/pull/93089 -[#93157]: https://github.com/cockroachdb/cockroach/pull/93157 -[#93190]: https://github.com/cockroachdb/cockroach/pull/93190 -[#93219]: https://github.com/cockroachdb/cockroach/pull/93219 -[#93274]: https://github.com/cockroachdb/cockroach/pull/93274 -[#93377]: https://github.com/cockroachdb/cockroach/pull/93377 -[#93414]: https://github.com/cockroachdb/cockroach/pull/93414 -[#93513]: https://github.com/cockroachdb/cockroach/pull/93513 -[#93628]: https://github.com/cockroachdb/cockroach/pull/93628 -[#93644]: https://github.com/cockroachdb/cockroach/pull/93644 -[#93657]: https://github.com/cockroachdb/cockroach/pull/93657 -[#93673]: https://github.com/cockroachdb/cockroach/pull/93673 -[#93675]: https://github.com/cockroachdb/cockroach/pull/93675 -[#93748]: https://github.com/cockroachdb/cockroach/pull/93748 -[#93750]: https://github.com/cockroachdb/cockroach/pull/93750 -[#93751]: https://github.com/cockroachdb/cockroach/pull/93751 -[#93754]: https://github.com/cockroachdb/cockroach/pull/93754 -[#93757]: https://github.com/cockroachdb/cockroach/pull/93757 -[#93760]: https://github.com/cockroachdb/cockroach/pull/93760 -[#93769]: https://github.com/cockroachdb/cockroach/pull/93769 -[#93800]: https://github.com/cockroachdb/cockroach/pull/93800 -[#93824]: https://github.com/cockroachdb/cockroach/pull/93824 -[#93836]: https://github.com/cockroachdb/cockroach/pull/93836 -[#93838]: https://github.com/cockroachdb/cockroach/pull/93838 -[#93855]: https://github.com/cockroachdb/cockroach/pull/93855 -[#93858]: https://github.com/cockroachdb/cockroach/pull/93858 -[#93868]: https://github.com/cockroachdb/cockroach/pull/93868 -[#93871]: https://github.com/cockroachdb/cockroach/pull/93871 -[#93891]: https://github.com/cockroachdb/cockroach/pull/93891 -[#93901]: https://github.com/cockroachdb/cockroach/pull/93901 -[#93937]: https://github.com/cockroachdb/cockroach/pull/93937 -[#93938]: https://github.com/cockroachdb/cockroach/pull/93938 -[#93939]: https://github.com/cockroachdb/cockroach/pull/93939 -[#93952]: https://github.com/cockroachdb/cockroach/pull/93952 -[#93956]: https://github.com/cockroachdb/cockroach/pull/93956 -[#93974]: https://github.com/cockroachdb/cockroach/pull/93974 -[#93979]: https://github.com/cockroachdb/cockroach/pull/93979 -[#93991]: https://github.com/cockroachdb/cockroach/pull/93991 -[#93997]: https://github.com/cockroachdb/cockroach/pull/93997 -[#94002]: https://github.com/cockroachdb/cockroach/pull/94002 -[#94021]: https://github.com/cockroachdb/cockroach/pull/94021 -[#94031]: https://github.com/cockroachdb/cockroach/pull/94031 -[#94045]: https://github.com/cockroachdb/cockroach/pull/94045 -[#94106]: https://github.com/cockroachdb/cockroach/pull/94106 -[#94122]: https://github.com/cockroachdb/cockroach/pull/94122 -[#94123]: https://github.com/cockroachdb/cockroach/pull/94123 -[#94134]: https://github.com/cockroachdb/cockroach/pull/94134 -[#94151]: https://github.com/cockroachdb/cockroach/pull/94151 -[#94152]: https://github.com/cockroachdb/cockroach/pull/94152 -[#94153]: https://github.com/cockroachdb/cockroach/pull/94153 -[#94154]: https://github.com/cockroachdb/cockroach/pull/94154 -[#94165]: https://github.com/cockroachdb/cockroach/pull/94165 -[#94207]: https://github.com/cockroachdb/cockroach/pull/94207 -[#94246]: https://github.com/cockroachdb/cockroach/pull/94246 -[#94247]: https://github.com/cockroachdb/cockroach/pull/94247 -[#94285]: https://github.com/cockroachdb/cockroach/pull/94285 -[#94314]: https://github.com/cockroachdb/cockroach/pull/94314 -[#94339]: https://github.com/cockroachdb/cockroach/pull/94339 -[#94342]: https://github.com/cockroachdb/cockroach/pull/94342 -[#94355]: https://github.com/cockroachdb/cockroach/pull/94355 -[#94399]: https://github.com/cockroachdb/cockroach/pull/94399 -[#94405]: https://github.com/cockroachdb/cockroach/pull/94405 -[#94421]: https://github.com/cockroachdb/cockroach/pull/94421 -[#94425]: https://github.com/cockroachdb/cockroach/pull/94425 -[#94429]: https://github.com/cockroachdb/cockroach/pull/94429 -[#94432]: https://github.com/cockroachdb/cockroach/pull/94432 -[#94436]: https://github.com/cockroachdb/cockroach/pull/94436 -[#94440]: https://github.com/cockroachdb/cockroach/pull/94440 -[#94455]: https://github.com/cockroachdb/cockroach/pull/94455 -[#94581]: https://github.com/cockroachdb/cockroach/pull/94581 -[#94634]: https://github.com/cockroachdb/cockroach/pull/94634 -[#94645]: https://github.com/cockroachdb/cockroach/pull/94645 -[#94653]: https://github.com/cockroachdb/cockroach/pull/94653 -[#94670]: https://github.com/cockroachdb/cockroach/pull/94670 -[#94677]: https://github.com/cockroachdb/cockroach/pull/94677 -[#94692]: https://github.com/cockroachdb/cockroach/pull/94692 -[#94700]: https://github.com/cockroachdb/cockroach/pull/94700 -[#94705]: https://github.com/cockroachdb/cockroach/pull/94705 -[#94710]: https://github.com/cockroachdb/cockroach/pull/94710 -[#94719]: https://github.com/cockroachdb/cockroach/pull/94719 -[#94720]: https://github.com/cockroachdb/cockroach/pull/94720 -[#94725]: https://github.com/cockroachdb/cockroach/pull/94725 -[#94739]: https://github.com/cockroachdb/cockroach/pull/94739 -[#94771]: https://github.com/cockroachdb/cockroach/pull/94771 -[#94777]: https://github.com/cockroachdb/cockroach/pull/94777 -[#94791]: https://github.com/cockroachdb/cockroach/pull/94791 -[#94796]: https://github.com/cockroachdb/cockroach/pull/94796 -[#94806]: https://github.com/cockroachdb/cockroach/pull/94806 -[#94863]: https://github.com/cockroachdb/cockroach/pull/94863 -[#94894]: https://github.com/cockroachdb/cockroach/pull/94894 -[#94915]: https://github.com/cockroachdb/cockroach/pull/94915 -[#94948]: https://github.com/cockroachdb/cockroach/pull/94948 -[#94950]: https://github.com/cockroachdb/cockroach/pull/94950 -[#94958]: https://github.com/cockroachdb/cockroach/pull/94958 -[#94962]: https://github.com/cockroachdb/cockroach/pull/94962 -[#94985]: https://github.com/cockroachdb/cockroach/pull/94985 -[#95005]: https://github.com/cockroachdb/cockroach/pull/95005 -[#95009]: https://github.com/cockroachdb/cockroach/pull/95009 -[#95016]: https://github.com/cockroachdb/cockroach/pull/95016 -[#95020]: https://github.com/cockroachdb/cockroach/pull/95020 -[#95026]: https://github.com/cockroachdb/cockroach/pull/95026 -[#95029]: https://github.com/cockroachdb/cockroach/pull/95029 -[#95038]: https://github.com/cockroachdb/cockroach/pull/95038 -[#95091]: https://github.com/cockroachdb/cockroach/pull/95091 -[#95179]: https://github.com/cockroachdb/cockroach/pull/95179 -[#95190]: https://github.com/cockroachdb/cockroach/pull/95190 -[#95209]: https://github.com/cockroachdb/cockroach/pull/95209 -[#95219]: https://github.com/cockroachdb/cockroach/pull/95219 -[#95234]: https://github.com/cockroachdb/cockroach/pull/95234 -[#95258]: https://github.com/cockroachdb/cockroach/pull/95258 -[#95275]: https://github.com/cockroachdb/cockroach/pull/95275 -[#95289]: https://github.com/cockroachdb/cockroach/pull/95289 -[#95291]: https://github.com/cockroachdb/cockroach/pull/95291 -[#95294]: https://github.com/cockroachdb/cockroach/pull/95294 -[#95303]: https://github.com/cockroachdb/cockroach/pull/95303 -[#95326]: https://github.com/cockroachdb/cockroach/pull/95326 -[#95376]: https://github.com/cockroachdb/cockroach/pull/95376 -[#95397]: https://github.com/cockroachdb/cockroach/pull/95397 -[#95398]: https://github.com/cockroachdb/cockroach/pull/95398 -[#95413]: https://github.com/cockroachdb/cockroach/pull/95413 -[#95414]: https://github.com/cockroachdb/cockroach/pull/95414 -[#95426]: https://github.com/cockroachdb/cockroach/pull/95426 -[#95436]: https://github.com/cockroachdb/cockroach/pull/95436 -[#95450]: https://github.com/cockroachdb/cockroach/pull/95450 -[#95454]: https://github.com/cockroachdb/cockroach/pull/95454 -[#95461]: https://github.com/cockroachdb/cockroach/pull/95461 -[#95466]: https://github.com/cockroachdb/cockroach/pull/95466 -[#95467]: https://github.com/cockroachdb/cockroach/pull/95467 -[#95516]: https://github.com/cockroachdb/cockroach/pull/95516 -[#95532]: https://github.com/cockroachdb/cockroach/pull/95532 -[#95544]: https://github.com/cockroachdb/cockroach/pull/95544 -[#95553]: https://github.com/cockroachdb/cockroach/pull/95553 -[#95556]: https://github.com/cockroachdb/cockroach/pull/95556 -[#95562]: https://github.com/cockroachdb/cockroach/pull/95562 -[#95591]: https://github.com/cockroachdb/cockroach/pull/95591 -[#95599]: https://github.com/cockroachdb/cockroach/pull/95599 -[#95638]: https://github.com/cockroachdb/cockroach/pull/95638 -[#95639]: https://github.com/cockroachdb/cockroach/pull/95639 -[#95675]: https://github.com/cockroachdb/cockroach/pull/95675 -[#95710]: https://github.com/cockroachdb/cockroach/pull/95710 -[#95739]: https://github.com/cockroachdb/cockroach/pull/95739 -[#95795]: https://github.com/cockroachdb/cockroach/pull/95795 -[#95796]: https://github.com/cockroachdb/cockroach/pull/95796 -[#95798]: https://github.com/cockroachdb/cockroach/pull/95798 -[#95802]: https://github.com/cockroachdb/cockroach/pull/95802 -[#95848]: https://github.com/cockroachdb/cockroach/pull/95848 -[#95850]: https://github.com/cockroachdb/cockroach/pull/95850 -[#95868]: https://github.com/cockroachdb/cockroach/pull/95868 -[#95876]: https://github.com/cockroachdb/cockroach/pull/95876 -[#95883]: https://github.com/cockroachdb/cockroach/pull/95883 -[#95894]: https://github.com/cockroachdb/cockroach/pull/95894 -[#95911]: https://github.com/cockroachdb/cockroach/pull/95911 -[#95963]: https://github.com/cockroachdb/cockroach/pull/95963 -[#95997]: https://github.com/cockroachdb/cockroach/pull/95997 -[#96002]: https://github.com/cockroachdb/cockroach/pull/96002 -[#96010]: https://github.com/cockroachdb/cockroach/pull/96010 -[#96019]: https://github.com/cockroachdb/cockroach/pull/96019 -[#96023]: https://github.com/cockroachdb/cockroach/pull/96023 -[#96029]: https://github.com/cockroachdb/cockroach/pull/96029 -[#96030]: https://github.com/cockroachdb/cockroach/pull/96030 -[#96048]: https://github.com/cockroachdb/cockroach/pull/96048 -[#96112]: https://github.com/cockroachdb/cockroach/pull/96112 -[#96113]: https://github.com/cockroachdb/cockroach/pull/96113 -[#96115]: https://github.com/cockroachdb/cockroach/pull/96115 -[#96202]: https://github.com/cockroachdb/cockroach/pull/96202 -[#96213]: https://github.com/cockroachdb/cockroach/pull/96213 -[#96217]: https://github.com/cockroachdb/cockroach/pull/96217 -[#96243]: https://github.com/cockroachdb/cockroach/pull/96243 -[#96278]: https://github.com/cockroachdb/cockroach/pull/96278 -[#96279]: https://github.com/cockroachdb/cockroach/pull/96279 -[#96287]: https://github.com/cockroachdb/cockroach/pull/96287 -[#96302]: https://github.com/cockroachdb/cockroach/pull/96302 -[#96314]: https://github.com/cockroachdb/cockroach/pull/96314 -[#96340]: https://github.com/cockroachdb/cockroach/pull/96340 -[#96371]: https://github.com/cockroachdb/cockroach/pull/96371 -[#96456]: https://github.com/cockroachdb/cockroach/pull/96456 -[#96466]: https://github.com/cockroachdb/cockroach/pull/96466 -[#96476]: https://github.com/cockroachdb/cockroach/pull/96476 -[#96510]: https://github.com/cockroachdb/cockroach/pull/96510 -[#96562]: https://github.com/cockroachdb/cockroach/pull/96562 -[#96563]: https://github.com/cockroachdb/cockroach/pull/96563 -[019299b04]: https://github.com/cockroachdb/cockroach/commit/019299b04 -[0e57b6aef]: https://github.com/cockroachdb/cockroach/commit/0e57b6aef -[2428a4377]: https://github.com/cockroachdb/cockroach/commit/2428a4377 -[25f5dd3ba]: https://github.com/cockroachdb/cockroach/commit/25f5dd3ba -[26f442706]: https://github.com/cockroachdb/cockroach/commit/26f442706 -[28288d23b]: https://github.com/cockroachdb/cockroach/commit/28288d23b -[2abca5cb4]: https://github.com/cockroachdb/cockroach/commit/2abca5cb4 -[2ed44c54c]: https://github.com/cockroachdb/cockroach/commit/2ed44c54c -[2f69d92ad]: https://github.com/cockroachdb/cockroach/commit/2f69d92ad -[317c6e5af]: https://github.com/cockroachdb/cockroach/commit/317c6e5af -[3dbb321e1]: https://github.com/cockroachdb/cockroach/commit/3dbb321e1 -[4e6aaf381]: https://github.com/cockroachdb/cockroach/commit/4e6aaf381 -[55b032315]: https://github.com/cockroachdb/cockroach/commit/55b032315 -[6663caeb8]: https://github.com/cockroachdb/cockroach/commit/6663caeb8 -[68fd6477f]: https://github.com/cockroachdb/cockroach/commit/68fd6477f -[72ee36ab8]: https://github.com/cockroachdb/cockroach/commit/72ee36ab8 -[74dd4a175]: https://github.com/cockroachdb/cockroach/commit/74dd4a175 -[7b92c4eb0]: https://github.com/cockroachdb/cockroach/commit/7b92c4eb0 -[7d1db1cd7]: https://github.com/cockroachdb/cockroach/commit/7d1db1cd7 -[82d4c7cdf]: https://github.com/cockroachdb/cockroach/commit/82d4c7cdf -[9235a8b92]: https://github.com/cockroachdb/cockroach/commit/9235a8b92 -[9247472f5]: https://github.com/cockroachdb/cockroach/commit/9247472f5 -[99a7b47f2]: https://github.com/cockroachdb/cockroach/commit/99a7b47f2 -[9a8de1ede]: https://github.com/cockroachdb/cockroach/commit/9a8de1ede -[9f64db7f1]: https://github.com/cockroachdb/cockroach/commit/9f64db7f1 -[a94858bff]: https://github.com/cockroachdb/cockroach/commit/a94858bff -[a954f43a8]: https://github.com/cockroachdb/cockroach/commit/a954f43a8 -[afeaf3484]: https://github.com/cockroachdb/cockroach/commit/afeaf3484 -[b9a0471e4]: https://github.com/cockroachdb/cockroach/commit/b9a0471e4 -[b9d00ddd7]: https://github.com/cockroachdb/cockroach/commit/b9d00ddd7 -[bd7c9b961]: https://github.com/cockroachdb/cockroach/commit/bd7c9b961 -[bf6ce14e2]: https://github.com/cockroachdb/cockroach/commit/bf6ce14e2 -[c0231f35a]: https://github.com/cockroachdb/cockroach/commit/c0231f35a -[c7b299369]: https://github.com/cockroachdb/cockroach/commit/c7b299369 -[c94d0b7fc]: https://github.com/cockroachdb/cockroach/commit/c94d0b7fc -[ce5db0c69]: https://github.com/cockroachdb/cockroach/commit/ce5db0c69 -[d05c9699d]: https://github.com/cockroachdb/cockroach/commit/d05c9699d -[d8d13ce98]: https://github.com/cockroachdb/cockroach/commit/d8d13ce98 -[e14fe033d]: https://github.com/cockroachdb/cockroach/commit/e14fe033d -[e7d394b34]: https://github.com/cockroachdb/cockroach/commit/e7d394b34 -[ec0a2c4c1]: https://github.com/cockroachdb/cockroach/commit/ec0a2c4c1 -[edad297aa]: https://github.com/cockroachdb/cockroach/commit/edad297aa -[f2351f0e0]: https://github.com/cockroachdb/cockroach/commit/f2351f0e0 -[f7170c5db]: https://github.com/cockroachdb/cockroach/commit/f7170c5db -[f8a2c20f5]: https://github.com/cockroachdb/cockroach/commit/f8a2c20f5 -[fc0ec5521]: https://github.com/cockroachdb/cockroach/commit/fc0ec5521 -[ffad33a5a]: https://github.com/cockroachdb/cockroach/commit/ffad33a5a diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.3.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.3.md index c4f44c35165..5fe1046082a 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.3.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.3.md @@ -6,52 +6,52 @@ Release Date: February 21, 2023

General changes

-- Added new metrics to count [paused jobs]({% link v23.1/pause-job.md %}) for every job type. For example, the metric for paused changefeed jobs is `jobs.changefeed.currently_paused`. These metrics are updated at an interval defined by the cluster setting `jobs.metrics.interval.poll`, which defaults to 10 seconds. [#89752][#89752] +- Added new metrics to count [paused jobs]({% link v23.1/pause-job.md %}) for every job type. For example, the metric for paused changefeed jobs is `jobs.changefeed.currently_paused`. These metrics are updated at an interval defined by the cluster setting `jobs.metrics.interval.poll`, which defaults to 10 seconds. #89752

{{ site.data.products.enterprise }} edition changes

-- Added support for Azure Key Vault KMS, which will allow users to [encrypt and decrypt their backups]({% link v23.1/take-and-restore-encrypted-backups.md %}#use-key-management-service) using keys stored in Azure Key Vault. App Registration authentication (Azure RBAC) for Azure Storage is also now supported.[#96459][#96459] -- Introduced a new locality filter option (`coordinator_locality`) that can be specified when a [backup job]({% link v23.1/backup.md %}) is created. This option restricts the backup metadata writing to the designated nodes. [#95791][#95791] -- Fixed a bug that caused the server to crash if trying to restore a table from a backup generated by `BACKUP TABLE` from a schema that includes [user-defined functions]({% link v23.1/user-defined-functions.md %}), and the restore target database does not have a schema with the same name. [#96911][#96911] -- Changefeeds with the `unordered` option can use multi-region [Google Cloud Pub/Sub topics]({% link v23.1/changefeed-sinks.md %}). [#96567][#96567] -- Fixed a bug in changefeeds where long running [initial scans]({% link v23.1/create-changefeed.md %}#initial-scan) will fail to generate checkpoints. Failure to generate [checkpoints]({% link v23.1/how-does-an-enterprise-changefeed-work.md %}) is particularly bad if the changefeed restarts. Without checkpoints, the changefeed will restart from the beginning, and in the worst case, when exporting substantially sized tables, the changefeed initial scan may not complete. [#96995][#96995] +- Added support for Azure Key Vault KMS, which will allow users to [encrypt and decrypt their backups]({% link v23.1/take-and-restore-encrypted-backups.md %}#use-key-management-service) using keys stored in Azure Key Vault. App Registration authentication (Azure RBAC) for Azure Storage is also now supported.#96459 +- Introduced a new locality filter option (`coordinator_locality`) that can be specified when a [backup job]({% link v23.1/backup.md %}) is created. This option restricts the backup metadata writing to the designated nodes. #95791 +- Fixed a bug that caused the server to crash if trying to restore a table from a backup generated by `BACKUP TABLE` from a schema that includes [user-defined functions]({% link v23.1/user-defined-functions.md %}), and the restore target database does not have a schema with the same name. #96911 +- Changefeeds with the `unordered` option can use multi-region [Google Cloud Pub/Sub topics]({% link v23.1/changefeed-sinks.md %}). #96567 +- Fixed a bug in changefeeds where long running [initial scans]({% link v23.1/create-changefeed.md %}#initial-scan) will fail to generate checkpoints. Failure to generate [checkpoints]({% link v23.1/how-does-an-enterprise-changefeed-work.md %}) is particularly bad if the changefeed restarts. Without checkpoints, the changefeed will restart from the beginning, and in the worst case, when exporting substantially sized tables, the changefeed initial scan may not complete. #96995

SQL language changes

-- Added [latency information]({% link cockroachcloud/statements-page.md %}#statement-statistics) in seconds to the statement statistics on `crdb_internal.statement_statistics`, `system.statement_statistics`, and `crdb_internal.cluster_statement_statistics`, with information about: `min`, `max`, `p50`, `p90`, and `p99. Also added the columns: `latency_seconds_min`, `latency_seconds_max`, `latency_seconds_p50`, `latency_seconds_p90`, and `latency_seconds_p99` to `crdb_internal.node_statement_statistics`.[#96396][#96396] -- Deprecated the `PGDUMP` and `MYSQLDUMP` formats for [`IMPORT`]({% link v23.1/import.md %}). They are still present, but will be removed in a future release. See the [Migration Overview]({% link molt/migration-overview.md %}) page for alternatives. [#96386][#96386] -- [`COPY ... FROM ... QUOTE '"'`]({% link v23.1/copy.md %}) will no longer error. [#96572][#96572] -- Added `last_error_code` column to the `crdb_internal.node_statement_statistics` table. Added `last_error_code` field to the `statistics` JSON blob in the `crdb_internal.statement_statistics` and `system.statement_statistics` tables. [#96436][#96436] -- Added support for expressions of the form `COLLATE "default"`, `COLLATE "C"`, and `COLLATE "POSIX"`. Since the default [collation]({% link v23.1/collate.md %}) cannot be changed currently, these expressions are all equivalent. The expressions are evaluated by treating the input as a normal string, and ignoring the collation. This means that comparisons between strings and collated strings that use `"default"`, `"C"`, or `"POSIX"` are now supported. Creating a column with the `"C"` or `"POSIX"` collations is still not supported. [#96828][#96828] -- The insights subsystem in `sqlstats` is now able to detect failed executions, regardless if they were slow or not. [#97039][#97039] -- The internal `statement_statistics` and `transaction_statistics` tables now include sampled execution statistics on storage iteration. [#96016][#96016] -- Introduced the `declare_cursor_statement_timeout_enabled` [session variable]({% link v23.1/set-vars.md %}) that disables statement timeouts during `FETCH` when using `DECLARE CURSOR`. [#96607][#96607] +- Added [latency information]({% link cockroachcloud/statements-page.md %}#statement-statistics) in seconds to the statement statistics on `crdb_internal.statement_statistics`, `system.statement_statistics`, and `crdb_internal.cluster_statement_statistics`, with information about: `min`, `max`, `p50`, `p90`, and `p99. Also added the columns: `latency_seconds_min`, `latency_seconds_max`, `latency_seconds_p50`, `latency_seconds_p90`, and `latency_seconds_p99` to `crdb_internal.node_statement_statistics`.#96396 +- Deprecated the `PGDUMP` and `MYSQLDUMP` formats for [`IMPORT`]({% link v23.1/import.md %}). They are still present, but will be removed in a future release. See the [Migration Overview]({% link molt/migration-overview.md %}) page for alternatives. #96386 +- [`COPY ... FROM ... QUOTE '"'`]({% link v23.1/copy.md %}) will no longer error. #96572 +- Added `last_error_code` column to the `crdb_internal.node_statement_statistics` table. Added `last_error_code` field to the `statistics` JSON blob in the `crdb_internal.statement_statistics` and `system.statement_statistics` tables. #96436 +- Added support for expressions of the form `COLLATE "default"`, `COLLATE "C"`, and `COLLATE "POSIX"`. Since the default [collation]({% link v23.1/collate.md %}) cannot be changed currently, these expressions are all equivalent. The expressions are evaluated by treating the input as a normal string, and ignoring the collation. This means that comparisons between strings and collated strings that use `"default"`, `"C"`, or `"POSIX"` are now supported. Creating a column with the `"C"` or `"POSIX"` collations is still not supported. #96828 +- The insights subsystem in `sqlstats` is now able to detect failed executions, regardless if they were slow or not. #97039 +- The internal `statement_statistics` and `transaction_statistics` tables now include sampled execution statistics on storage iteration. #96016 +- Introduced the `declare_cursor_statement_timeout_enabled` [session variable]({% link v23.1/set-vars.md %}) that disables statement timeouts during `FETCH` when using `DECLARE CURSOR`. #96607

Operational changes

-- A [`BACKUP`]({% link v23.1/backup.md %}) that encounters too many retryable errors will now fail instead of pausing to allow subsequent backups the chance to succeed. [#96673][#96673] -- Added an option to balance CPU time (`cpu`) instead of queries per second (`qps`) among stores in a cluster. This is done by setting `kv.allocator.load_based_rebalancing.objective='cpu'`. `kv.allocator.cpu_rebalance_threshold` is also added, similar to `kv.allocator.qps_rebalance_threshold` to control the target range for store CPU above and below the cluster mean. [#96127][#96127] -- The [load-based splitter]({% link v23.1/load-based-splitting.md %}) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. [#96128][#96128] -- Added the flag `--disable-max-offset-check` to disable node self-termination when it detects [clock skew]({% link v23.1/operational-faqs.md %}#what-happens-when-node-clocks-are-not-properly-synchronized) with the rest of the cluster beyond `--max-offset`. The operator assumes responsibility for ensuring that real clock skew never exceeds `--max-offset`. [#96141][#96141] +- A [`BACKUP`]({% link v23.1/backup.md %}) that encounters too many retryable errors will now fail instead of pausing to allow subsequent backups the chance to succeed. #96673 +- Added an option to balance CPU time (`cpu`) instead of queries per second (`qps`) among stores in a cluster. This is done by setting `kv.allocator.load_based_rebalancing.objective='cpu'`. `kv.allocator.cpu_rebalance_threshold` is also added, similar to `kv.allocator.qps_rebalance_threshold` to control the target range for store CPU above and below the cluster mean. #96127 +- The [load-based splitter]({% link v23.1/load-based-splitting.md %}) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. #96128 +- Added the flag `--disable-max-offset-check` to disable node self-termination when it detects [clock skew]({% link v23.1/operational-faqs.md %}#what-happens-when-node-clocks-are-not-properly-synchronized) with the rest of the cluster beyond `--max-offset`. The operator assumes responsibility for ensuring that real clock skew never exceeds `--max-offset`. #96141

DB Console changes

-- Added execution insights to the [Statement Fingerprint Details](https://www.cockroachlabs.com/docs/cockroachcloud/statements-page.html#sql-statement-fingerprints) and [Transaction Fingerprint Details]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view) pages. [#96440][#96440] -- Transaction insights that were not found will now display a message `Insight not found`. [#96832][#96832] -- Added a waiting statement ID and fingerprint to the [Insights Transaction]({% link v23.1/ui-insights-page.md %}#transaction-executions-view) details page. Added a blocking transaction ID and fingerprint to the the [Insights Statement]({% link v23.1/ui-insights-page.md %}#statement-executions-view) page. [#96872][#96872] +- Added execution insights to the [Statement Fingerprint Details](https://www.cockroachlabs.com/docs/cockroachcloud/statements-page.html#sql-statement-fingerprints) and [Transaction Fingerprint Details]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view) pages. #96440 +- Transaction insights that were not found will now display a message `Insight not found`. #96832 +- Added a waiting statement ID and fingerprint to the [Insights Transaction]({% link v23.1/ui-insights-page.md %}#transaction-executions-view) details page. Added a blocking transaction ID and fingerprint to the the [Insights Statement]({% link v23.1/ui-insights-page.md %}#statement-executions-view) page. #96872

Bug fixes

-- Fixed a bug where casting a [`TIMETZ`]({% link v23.1/time.md %}) to an [`ARRAY`]({% link v23.1/array.md %}) results in displaying second offsets, even if they are zero. [#96583][#96583] -- Allowed [`ALTER TABLE .. ADD/DROP CONSTRAINT .. NOT VALID`]({% link v23.1/alter-table.md %}) and [`VALIDATE CONSTRAINT ..`]({% link v23.1/alter-table.md %}#validate-constraints) to behave consistently with PostgreSQL. Previously, the `VALIDATE CONSTRAINT` would fail and cause the whole statement to fail.[#96648][#96648] -- Resolved the [`TIMESTAMPTZ`]({% link v23.1/timestamp.md %}) to match PostgreSQL. We previously included the minute/second offset for `TIMESTAMPTZ` in certain places when casting it to [`STRING`]({% link v23.1/string.md %}), even when they were zero. [#96833][#96833] -- Resolved using negative years instead of BC when casting a [`TIMESTAMPTZ`]({% link v23.1/timestamp.md %}) to a [`STRING`]({% link v23.1/string.md %}). [#96833][#96833] -- Fixed the [`SHOW GRANTS FOR public`]({% link v23.1/show-grants.md %}) command so it works correctly. Previously, this would return an error saying that the `public` role does not exist. [#96957][#96957] -- Statement source (square bracket) syntax is no longer allowed in [user-defined functions]({% link v23.1/user-defined-functions.md %}). Prior to this fix, using this syntax in a UDF would cause a panic. This restriction will be lifted in the future. [#96824][#96824] +- Fixed a bug where casting a [`TIMETZ`]({% link v23.1/time.md %}) to an [`ARRAY`]({% link v23.1/array.md %}) results in displaying second offsets, even if they are zero. #96583 +- Allowed [`ALTER TABLE .. ADD/DROP CONSTRAINT .. NOT VALID`]({% link v23.1/alter-table.md %}) and [`VALIDATE CONSTRAINT ..`]({% link v23.1/alter-table.md %}#validate-constraints) to behave consistently with PostgreSQL. Previously, the `VALIDATE CONSTRAINT` would fail and cause the whole statement to fail.#96648 +- Resolved the [`TIMESTAMPTZ`]({% link v23.1/timestamp.md %}) to match PostgreSQL. We previously included the minute/second offset for `TIMESTAMPTZ` in certain places when casting it to [`STRING`]({% link v23.1/string.md %}), even when they were zero. #96833 +- Resolved using negative years instead of BC when casting a [`TIMESTAMPTZ`]({% link v23.1/timestamp.md %}) to a [`STRING`]({% link v23.1/string.md %}). #96833 +- Fixed the [`SHOW GRANTS FOR public`]({% link v23.1/show-grants.md %}) command so it works correctly. Previously, this would return an error saying that the `public` role does not exist. #96957 +- Statement source (square bracket) syntax is no longer allowed in [user-defined functions]({% link v23.1/user-defined-functions.md %}). Prior to this fix, using this syntax in a UDF would cause a panic. This restriction will be lifted in the future. #96824

Performance improvements

-- The execution of multiple [`FOREIGN KEY`]({% link v23.1/foreign-key.md %}) and [`UNIQUE`]({% link v23.1/unique.md %}) constraint checks have been parallelized in some cases. As a result, these checks should be completed faster, particularly in multi-region environments where the checks require cross-region reads. [#96123][#96123] +- The execution of multiple [`FOREIGN KEY`]({% link v23.1/foreign-key.md %}) and [`UNIQUE`]({% link v23.1/unique.md %}) constraint checks have been parallelized in some cases. As a result, these checks should be completed faster, particularly in multi-region environments where the checks require cross-region reads. #96123
@@ -64,39 +64,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#89752]: https://github.com/cockroachdb/cockroach/pull/89752 -[#94825]: https://github.com/cockroachdb/cockroach/pull/94825 -[#95791]: https://github.com/cockroachdb/cockroach/pull/95791 -[#96016]: https://github.com/cockroachdb/cockroach/pull/96016 -[#96123]: https://github.com/cockroachdb/cockroach/pull/96123 -[#96127]: https://github.com/cockroachdb/cockroach/pull/96127 -[#96128]: https://github.com/cockroachdb/cockroach/pull/96128 -[#96141]: https://github.com/cockroachdb/cockroach/pull/96141 -[#96386]: https://github.com/cockroachdb/cockroach/pull/96386 -[#96396]: https://github.com/cockroachdb/cockroach/pull/96396 -[#96436]: https://github.com/cockroachdb/cockroach/pull/96436 -[#96440]: https://github.com/cockroachdb/cockroach/pull/96440 -[#96459]: https://github.com/cockroachdb/cockroach/pull/96459 -[#96567]: https://github.com/cockroachdb/cockroach/pull/96567 -[#96572]: https://github.com/cockroachdb/cockroach/pull/96572 -[#96583]: https://github.com/cockroachdb/cockroach/pull/96583 -[#96607]: https://github.com/cockroachdb/cockroach/pull/96607 -[#96648]: https://github.com/cockroachdb/cockroach/pull/96648 -[#96673]: https://github.com/cockroachdb/cockroach/pull/96673 -[#96824]: https://github.com/cockroachdb/cockroach/pull/96824 -[#96828]: https://github.com/cockroachdb/cockroach/pull/96828 -[#96832]: https://github.com/cockroachdb/cockroach/pull/96832 -[#96833]: https://github.com/cockroachdb/cockroach/pull/96833 -[#96872]: https://github.com/cockroachdb/cockroach/pull/96872 -[#96902]: https://github.com/cockroachdb/cockroach/pull/96902 -[#96911]: https://github.com/cockroachdb/cockroach/pull/96911 -[#96957]: https://github.com/cockroachdb/cockroach/pull/96957 -[#96995]: https://github.com/cockroachdb/cockroach/pull/96995 -[#97039]: https://github.com/cockroachdb/cockroach/pull/97039 -[0529c92d4]: https://github.com/cockroachdb/cockroach/commit/0529c92d4 -[14301f0d8]: https://github.com/cockroachdb/cockroach/commit/14301f0d8 -[15b1c6ae6]: https://github.com/cockroachdb/cockroach/commit/15b1c6ae6 -[470777fa3]: https://github.com/cockroachdb/cockroach/commit/470777fa3 -[8cf2cedb6]: https://github.com/cockroachdb/cockroach/commit/8cf2cedb6 -[9266fdc2a]: https://github.com/cockroachdb/cockroach/commit/9266fdc2a -[ac23f4667]: https://github.com/cockroachdb/cockroach/commit/ac23f4667 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.4.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.4.md index ec383b0fedf..1dc777a8f8a 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.4.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.4.md @@ -6,18 +6,18 @@ Release Date: February 27, 2023

{{ site.data.products.enterprise }} edition changes

-- Some of the transformations specific to changefeeds have been deprecated and replaced. These functions were released in limited access in v22.2. Deprecated changefeed transformations continue to function. Closely monitor changefeeds that are created during upgrade. While effort was made to maintain backward compatibility, the updated changefeed transformation may produce slightly different output, such as different column names. [#96295][#96295] -- Add support for implicit authentication to Azure Storage and KMS. [#96825][#96825] -- Add support for `CREATE EXTERNAL CONNECTION ... AS "postgresql://"` or `"postgres://"`. These external connections may be specified as the source in streaming replication. [#96551][#96551] -- Add support for referencing [user defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) from other objects. Backup and restore operations can now read and write UDF descriptors. [#97038][#97038] +- Some of the transformations specific to changefeeds have been deprecated and replaced. These functions were released in limited access in v22.2. Deprecated changefeed transformations continue to function. Closely monitor changefeeds that are created during upgrade. While effort was made to maintain backward compatibility, the updated changefeed transformation may produce slightly different output, such as different column names. #96295 +- Add support for implicit authentication to Azure Storage and KMS. #96825 +- Add support for `CREATE EXTERNAL CONNECTION ... AS "postgresql://"` or `"postgres://"`. These external connections may be specified as the source in streaming replication. #96551 +- Add support for referencing [user defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) from other objects. Backup and restore operations can now read and write UDF descriptors. #97038

SQL language changes

-- UDFs can now return a set of results by setting the return type to `SETOF`. [#96698][#96698] -- UDFs with implicit record return types will return an error when called if the return type has been altered and is no longer compatible with the body of the UDF. [#96696][#96696] -- The `COPY ... TO STDOUT` statement allows you to export a table or arbitrary query in the PostgreSQL wire-compatible format. Text and CSV formats are supported. [#94408][#94408] -- Add the read-only "ssl" session variable. Value is "off" if the server was started in insecure mode (which disables TLS), or "on" otherwise. This is based on the Postgres variable of the same name. [#97257][#97257] -- Add a hard limit of how much data can be flushed to system tables for sql stats. [#97123][#97123] +- UDFs can now return a set of results by setting the return type to `SETOF`. #96698 +- UDFs with implicit record return types will return an error when called if the return type has been altered and is no longer compatible with the body of the UDF. #96696 +- The `COPY ... TO STDOUT` statement allows you to export a table or arbitrary query in the PostgreSQL wire-compatible format. Text and CSV formats are supported. #94408 +- Add the read-only "ssl" session variable. Value is "off" if the server was started in insecure mode (which disables TLS), or "on" otherwise. This is based on the Postgres variable of the same name. #97257 +- Add a hard limit of how much data can be flushed to system tables for sql stats. #97123 - Add support for the `REDACT` flag to the following variants of `EXPLAIN`: - `EXPLAIN` - `EXPLAIN (PLAN)` @@ -25,53 +25,53 @@ Release Date: February 27, 2023 - `EXPLAIN ANALYZE` - `EXPLAIN ANALYZE (PLAN)` - These explain statements (along with `EXPLAIN ANALYZE (DEBUG)`, which already supported `REDACT`) will have constants, literal values, parameter values, and any other user data redacted in output. [#95136][#95136] -- Previously UDFs are not allowed in tables and any other object. This patch enables UDF usage in CHECK constraints of tables in both legacy schema changer and delcarative schema changer. Circular ependencies are not allowed, namely if a UDF depends on a table, then the table can't use that UDF. [#97038][#97038] -- A version gate has been added to prevent UDF usage in CHECK constraints before a cluster is fully upgraded to v23.1 or above. [#97038][#97038] -- Previously users were able to use UDFs from tables with `SET DEFAULT` and `SET ON UPDATE` even when they are disallowed from `CREATE TABLE` and `ADD COLUMN`. This patch disallows those two cases from `ALTER TABLE ALTER COLUMN`. [#97390][#97390] + These explain statements (along with `EXPLAIN ANALYZE (DEBUG)`, which already supported `REDACT`) will have constants, literal values, parameter values, and any other user data redacted in output. #95136 +- Previously UDFs are not allowed in tables and any other object. This patch enables UDF usage in CHECK constraints of tables in both legacy schema changer and delcarative schema changer. Circular ependencies are not allowed, namely if a UDF depends on a table, then the table can't use that UDF. #97038 +- A version gate has been added to prevent UDF usage in CHECK constraints before a cluster is fully upgraded to v23.1 or above. #97038 +- Previously users were able to use UDFs from tables with `SET DEFAULT` and `SET ON UPDATE` even when they are disallowed from `CREATE TABLE` and `ADD COLUMN`. This patch disallows those two cases from `ALTER TABLE ALTER COLUMN`. #97390

Operational changes

-- Add `COCKROACH_RAFT_LOG_QUEUE_CONCURRENCY` env var which controls the number of parallel workers doing Raft log truncations. It can be used to make the in-memory log truncations more agressive and reduce the amount of Raft log data flushed to disk. [#97029][#97029] -- The new timeseries metric `storage.keys.tombstone.count` shows the current count of point and range deletion tombstones across the storage engine. [#97306][#97306] -- The value of `kv.range_split.load_cpu_threshold` controls the CPU per-second threshold above which a range will be split. Its default value has been increased from `250ms` to `500ms`, based on performance experiments. [#97113][#97113] -- CPU balancing is enabled as the default load based rebalancing objective. This can be reverted by setting `kv.allocator.load_based_rebalancing.objective` to `qps`. [#97424][#97424] +- Add `COCKROACH_RAFT_LOG_QUEUE_CONCURRENCY` env var which controls the number of parallel workers doing Raft log truncations. It can be used to make the in-memory log truncations more agressive and reduce the amount of Raft log data flushed to disk. #97029 +- The new timeseries metric `storage.keys.tombstone.count` shows the current count of point and range deletion tombstones across the storage engine. #97306 +- The value of `kv.range_split.load_cpu_threshold` controls the CPU per-second threshold above which a range will be split. Its default value has been increased from `250ms` to `500ms`, based on performance experiments. #97113 +- CPU balancing is enabled as the default load based rebalancing objective. This can be reverted by setting `kv.allocator.load_based_rebalancing.objective` to `qps`. #97424

DB Console changes

-- Add columns p50, p90, p99, max and min latency for Statement table on SQL Activity page. [#97082][#97082] -- Show a warning for Statement Insights when the SQL API returns a "max size exceed" error. [#97153][#97153] -- Show a warning for Transaction Insights when the SQL API returns a "max size exceed" error. [#97277][#97277] -- Show a warning for Schema Insights when the SQL API returns a "max size exceed" error. [#97312][#97312] +- Add columns p50, p90, p99, max and min latency for Statement table on SQL Activity page. #97082 +- Show a warning for Statement Insights when the SQL API returns a "max size exceed" error. #97153 +- Show a warning for Transaction Insights when the SQL API returns a "max size exceed" error. #97277 +- Show a warning for Schema Insights when the SQL API returns a "max size exceed" error. #97312

Bug fixes

-- Fixed a bug where the `AS OF SYSTEM TIME` clause was handled incorrectly in an implicit transaction that had multiple statements. [#97063][#97063] -- This patch fixes asymmetric typing of `>` and `<` expressions which may cause erroring of expressions which are legal. [#97022][#97022] -- This patch fixes possible internal errors in `SOME`, `ANY` and `ALL` expressions of the form: `expr > SOME(expr1, expr2, expr3...)` [#97022][#97022] -- Fixed the syntax error for `SELECT ... QUERY` (without `AS`) statement. [#97041][#97041] -- Decommissions that would violate constraints set on a subset of replicas for a range (e.g. `num_replicas = 3, : 1`) will no longer be able to execute, respecting constraints during and after the decommission. [#94810][#94810] -- Adjusted the size reported for `TIME`, `TIMETZ`, `TIMESTAMP`, `TIMESTAMPTZ`, and `DATE` types in the pgwire protocol. [#97145][#97145] -- The ParameterStatus message is now only sent over the pgwire protocol if the value of the parameter changed. (The parameters that are sent this way are timezone, intervalstyle, datestyle, is_superuser, and application_name.) [#97145][#97145] -- Users can now go to the next page of results when there are more than 20 active statements or transactions in the Active Execution page. [#97122][#97122] -- Since 22.1 when rangefeed enablement overrides in span configs were introduced, rangefeed requests that reached spans outside the range would not cause range cache invalidation due to the setting being checked first, thus requests could repeatedly hit the same incorrect range, causing errors until cache invalidation or node restart. This fix correctly checks that the span is within the range prior to checking the enablement settings, thus invalidating the cache when a request reaches an incorrect range and causing subsequent requests to successfully reach the correct range. [#97212][#97212] -- A bug has been fixed that caused errors when creating multiple user-defined functions with the same name and different argument types in the same type family. For example, it was impossible to create both functions `f(i INT2)` and `f(INT4)`. [#96481][#96481] -- The following spammy log message was removed: > lease [...] expired before being followed by lease [...]; foreground traffic may have been impacted [#97358][#97358] -- Previously, `ALTER TABLE ... INJECT STATISTICS` command would fail if a column with COLLATED STRING type had histograms to be injected, and this is now fixed. The bug has been present since at least 21.2. [#96695][#96695] -- Fixed a bug in the query engine that could cause incorrect results in some cases when a zigzag join was planned. The bug could occur when the two indexes used for the zigzag join had a suffix of matching columns but with different directions. For example, planning a zigzag join with `INDEX(a ASC, b ASC)` and `INDEX(c ASC, b DESC)` could cause incorrect results. This bug has existed since at least v19.1. It is now fixed, because the optimizer will no longer plan a zigzag join in such cases. [#97151][#97151] +- Fixed a bug where the `AS OF SYSTEM TIME` clause was handled incorrectly in an implicit transaction that had multiple statements. #97063 +- This patch fixes asymmetric typing of `>` and `<` expressions which may cause erroring of expressions which are legal. #97022 +- This patch fixes possible internal errors in `SOME`, `ANY` and `ALL` expressions of the form: `expr > SOME(expr1, expr2, expr3...)` #97022 +- Fixed the syntax error for `SELECT ... QUERY` (without `AS`) statement. #97041 +- Decommissions that would violate constraints set on a subset of replicas for a range (e.g. `num_replicas = 3, : 1`) will no longer be able to execute, respecting constraints during and after the decommission. #94810 +- Adjusted the size reported for `TIME`, `TIMETZ`, `TIMESTAMP`, `TIMESTAMPTZ`, and `DATE` types in the pgwire protocol. #97145 +- The ParameterStatus message is now only sent over the pgwire protocol if the value of the parameter changed. (The parameters that are sent this way are timezone, intervalstyle, datestyle, is_superuser, and application_name.) #97145 +- Users can now go to the next page of results when there are more than 20 active statements or transactions in the Active Execution page. #97122 +- Since 22.1 when rangefeed enablement overrides in span configs were introduced, rangefeed requests that reached spans outside the range would not cause range cache invalidation due to the setting being checked first, thus requests could repeatedly hit the same incorrect range, causing errors until cache invalidation or node restart. This fix correctly checks that the span is within the range prior to checking the enablement settings, thus invalidating the cache when a request reaches an incorrect range and causing subsequent requests to successfully reach the correct range. #97212 +- A bug has been fixed that caused errors when creating multiple user-defined functions with the same name and different argument types in the same type family. For example, it was impossible to create both functions `f(i INT2)` and `f(INT4)`. #96481 +- The following spammy log message was removed: > lease [...] expired before being followed by lease [...]; foreground traffic may have been impacted #97358 +- Previously, `ALTER TABLE ... INJECT STATISTICS` command would fail if a column with COLLATED STRING type had histograms to be injected, and this is now fixed. The bug has been present since at least 21.2. #96695 +- Fixed a bug in the query engine that could cause incorrect results in some cases when a zigzag join was planned. The bug could occur when the two indexes used for the zigzag join had a suffix of matching columns but with different directions. For example, planning a zigzag join with `INDEX(a ASC, b ASC)` and `INDEX(c ASC, b DESC)` could cause incorrect results. This bug has existed since at least v19.1. It is now fixed, because the optimizer will no longer plan a zigzag join in such cases. #97151

Performance improvements

-- The optimizer now plans inverted index scans for queries that filter by JSON fetch value operators `(->)` that contain both a string and an integer index value after the IN operator. For example the optimizer plans inverted index scans for the following filters: `json_col->0 IN ('1', '2')`, `json_col->'a' IN ('1', '2')`. [#96471][#96471] +- The optimizer now plans inverted index scans for queries that filter by JSON fetch value operators `(->)` that contain both a string and an integer index value after the IN operator. For example the optimizer plans inverted index scans for the following filters: `json_col->0 IN ('1', '2')`, `json_col->'a' IN ('1', '2')`. #96471

Build changes

-- Starting with CockroachDB v23.1.0-alpha.4, a FIPS compliant tarball and Docker image are produced for the Linux x86_64 platform. The tarball uses OpenSSL libraries for crypto operations by dynamically loading the corresponding FIPS-validated dynamic libraries. The Docker image comes with the FIPS-validated OpenSSL library pre-installed.[#96107][#96107] +- Starting with CockroachDB v23.1.0-alpha.4, a FIPS compliant tarball and Docker image are produced for the Linux x86_64 platform. The tarball uses OpenSSL libraries for crypto operations by dynamically loading the corresponding FIPS-validated dynamic libraries. The Docker image comes with the FIPS-validated OpenSSL library pre-installed.#96107

Miscellaneous

-- [#97229][#97229] -- Fix a bug in which `RESTORE`, `BACKUP`, and `IMPORT` jobs would fail if the coordinator node of the job was drained. [#97033][#97033] +- #97229 +- Fix a bug in which `RESTORE`, `BACKUP`, and `IMPORT` jobs would fail if the coordinator node of the job was drained. #97033
@@ -84,42 +84,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#94408]: https://github.com/cockroachdb/cockroach/pull/94408 -[#94810]: https://github.com/cockroachdb/cockroach/pull/94810 -[#95136]: https://github.com/cockroachdb/cockroach/pull/95136 -[#96107]: https://github.com/cockroachdb/cockroach/pull/96107 -[#96295]: https://github.com/cockroachdb/cockroach/pull/96295 -[#96393]: https://github.com/cockroachdb/cockroach/pull/96393 -[#96471]: https://github.com/cockroachdb/cockroach/pull/96471 -[#96481]: https://github.com/cockroachdb/cockroach/pull/96481 -[#96551]: https://github.com/cockroachdb/cockroach/pull/96551 -[#96695]: https://github.com/cockroachdb/cockroach/pull/96695 -[#96696]: https://github.com/cockroachdb/cockroach/pull/96696 -[#96698]: https://github.com/cockroachdb/cockroach/pull/96698 -[#96825]: https://github.com/cockroachdb/cockroach/pull/96825 -[#97022]: https://github.com/cockroachdb/cockroach/pull/97022 -[#97029]: https://github.com/cockroachdb/cockroach/pull/97029 -[#97033]: https://github.com/cockroachdb/cockroach/pull/97033 -[#97038]: https://github.com/cockroachdb/cockroach/pull/97038 -[#97041]: https://github.com/cockroachdb/cockroach/pull/97041 -[#97063]: https://github.com/cockroachdb/cockroach/pull/97063 -[#97082]: https://github.com/cockroachdb/cockroach/pull/97082 -[#97113]: https://github.com/cockroachdb/cockroach/pull/97113 -[#97122]: https://github.com/cockroachdb/cockroach/pull/97122 -[#97123]: https://github.com/cockroachdb/cockroach/pull/97123 -[#97145]: https://github.com/cockroachdb/cockroach/pull/97145 -[#97151]: https://github.com/cockroachdb/cockroach/pull/97151 -[#97153]: https://github.com/cockroachdb/cockroach/pull/97153 -[#97171]: https://github.com/cockroachdb/cockroach/pull/97171 -[#97212]: https://github.com/cockroachdb/cockroach/pull/97212 -[#97229]: https://github.com/cockroachdb/cockroach/pull/97229 -[#97257]: https://github.com/cockroachdb/cockroach/pull/97257 -[#97277]: https://github.com/cockroachdb/cockroach/pull/97277 -[#97306]: https://github.com/cockroachdb/cockroach/pull/97306 -[#97312]: https://github.com/cockroachdb/cockroach/pull/97312 -[#97344]: https://github.com/cockroachdb/cockroach/pull/97344 -[#97358]: https://github.com/cockroachdb/cockroach/pull/97358 -[#97390]: https://github.com/cockroachdb/cockroach/pull/97390 -[#97424]: https://github.com/cockroachdb/cockroach/pull/97424 -[42a849c67]: https://github.com/cockroachdb/cockroach/commit/42a849c67 -[7ff5bfd74]: https://github.com/cockroachdb/cockroach/commit/7ff5bfd74 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.5.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.5.md index 52d81469513..158c4060347 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.5.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.5.md @@ -6,42 +6,42 @@ Release Date: March 6, 2023

Security updates

-- The new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. [#97429][#97429] +- The new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. #97429

General changes

-- Users with the `CONTROLJOB` [role option]({% link v23.1/create-role.md %}#role-options) can now [view jobs]({% link v23.1/show-jobs.md %}) owned by admins. [#96382][#96382] -- Users with the `VIEWJOB` [role option]({% link v23.1/create-role.md %}#role-options) can now [view all jobs]({% link v23.1/show-jobs.md %}). This role can be revoked by granting the `NOVIEWJOB` role option. [#96382][#96382] +- Users with the `CONTROLJOB` [role option]({% link v23.1/create-role.md %}#role-options) can now [view jobs]({% link v23.1/show-jobs.md %}) owned by admins. #96382 +- Users with the `VIEWJOB` [role option]({% link v23.1/create-role.md %}#role-options) can now [view all jobs]({% link v23.1/show-jobs.md %}). This role can be revoked by granting the `NOVIEWJOB` role option. #96382

{{ site.data.products.enterprise }} edition changes

-- Jobs that utilize a protected timestamp system (such as `BACKUP`, `CHANGEFEED`, or `IMPORT`) now produce metrics that can be monitored to detect cases when a job leaves a stale protected timestamp that will prevent garbage collection from occurring. [#97148][#97148] -- [Changefeeds]({% link v23.1/create-changefeed.md %}) now automatically expire protected timestamp records for paused jobs if the changefeed is configured with the `gc_protect_expires_after` option. [#97148][#97148] -- User-defined functions (UDFs) can now be referenced from column [`DEFAULT`]({% link v23.1/default-value.md %}) expressions when creating a new table or issuing the `SET DEFAULT` command. Backup and restore operations also back up and restore UDF IDs that are referenced in a column's DEFAULT expression. If UDF dependencies are missing and the `skip_missing_udfs` flag is provided, the `DEFAULT` expressions are dropped during a restore operation. [#97501][#97501] +- Jobs that utilize a protected timestamp system (such as `BACKUP`, `CHANGEFEED`, or `IMPORT`) now produce metrics that can be monitored to detect cases when a job leaves a stale protected timestamp that will prevent garbage collection from occurring. #97148 +- [Changefeeds]({% link v23.1/create-changefeed.md %}) now automatically expire protected timestamp records for paused jobs if the changefeed is configured with the `gc_protect_expires_after` option. #97148 +- User-defined functions (UDFs) can now be referenced from column [`DEFAULT`]({% link v23.1/default-value.md %}) expressions when creating a new table or issuing the `SET DEFAULT` command. Backup and restore operations also back up and restore UDF IDs that are referenced in a column's DEFAULT expression. If UDF dependencies are missing and the `skip_missing_udfs` flag is provided, the `DEFAULT` expressions are dropped during a restore operation. #97501

SQL language changes

-- String literals are now allowed for region names in DDL syntax, in addition to quoted syntax. [#97021][#97021] -- It is now possible to use `*` inside a [`CREATE VIEW`]({% link v23.1/create-view.md %}) statement. The list of columns is expanded at the time the view is created, so that new columns added after the view was defined are not included in the view. This behavior is the same as in PostgreSQL. [#97515][#97515] -- The default value of `sql.stats.cleanup.rows_to_delete_per_txn` has been increased to `10000` to increase efficiency of the cleanup job for SQL statistics. [#97642][#97642] -- The new [session setting]({% link v23.1/set-vars.md %}) `optimizer_use_improved_split_disjunction_for_joins` allows the optimizer to split disjunctions (`OR` expressions) in more `JOIN` conditions by building a `UNION` of two `JOIN` expressions. If this setting is true, all disjunctions in inner, semi, and anti `JOIN`s will be split. Otherwise, only disjunctions that potentially contain an equijoin condition will be split. [#97696][#97696] -- Builtins have been added for `tsvector`, `to_tsquery`, `phraseto_tsquery`, and `plainto_tsquery`, which parse input documents into tsvectors and tsqueries, respectively. The new `ts_parse` builtin is used to debug the text search parser. [#92966][#92966] -- The new [session variable]({% link v23.1/set-vars.md %}) `inject_retry_errors_on_commit_enabled` returns a [transaction retry error]({% link v23.1/transaction-retry-error-reference.md %}) if it is run inside of an explicit transaction when it is set to `true`. The transaction retry error continues to be returned until `inject_retry_errors_on_commit_enabled` is set to `false`. This setting allows you to test your transaction retry logic. [#97226][#97226] -- Previously, [`ADD PRIMARY KEY NOT VALID`]({% link v23.1/primary-key.md %}) ignored the `NOT VALID` qualifier. This behavior was not compatible with PostgreSQL. CockroachDB now throws the error `PRIMARY KEY constraints cannot be marked NOT VALID`. [#97746][#97746] +- String literals are now allowed for region names in DDL syntax, in addition to quoted syntax. #97021 +- It is now possible to use `*` inside a [`CREATE VIEW`]({% link v23.1/create-view.md %}) statement. The list of columns is expanded at the time the view is created, so that new columns added after the view was defined are not included in the view. This behavior is the same as in PostgreSQL. #97515 +- The default value of `sql.stats.cleanup.rows_to_delete_per_txn` has been increased to `10000` to increase efficiency of the cleanup job for SQL statistics. #97642 +- The new [session setting]({% link v23.1/set-vars.md %}) `optimizer_use_improved_split_disjunction_for_joins` allows the optimizer to split disjunctions (`OR` expressions) in more `JOIN` conditions by building a `UNION` of two `JOIN` expressions. If this setting is true, all disjunctions in inner, semi, and anti `JOIN`s will be split. Otherwise, only disjunctions that potentially contain an equijoin condition will be split. #97696 +- Builtins have been added for `tsvector`, `to_tsquery`, `phraseto_tsquery`, and `plainto_tsquery`, which parse input documents into tsvectors and tsqueries, respectively. The new `ts_parse` builtin is used to debug the text search parser. #92966 +- The new [session variable]({% link v23.1/set-vars.md %}) `inject_retry_errors_on_commit_enabled` returns a [transaction retry error]({% link v23.1/transaction-retry-error-reference.md %}) if it is run inside of an explicit transaction when it is set to `true`. The transaction retry error continues to be returned until `inject_retry_errors_on_commit_enabled` is set to `false`. This setting allows you to test your transaction retry logic. #97226 +- Previously, [`ADD PRIMARY KEY NOT VALID`]({% link v23.1/primary-key.md %}) ignored the `NOT VALID` qualifier. This behavior was not compatible with PostgreSQL. CockroachDB now throws the error `PRIMARY KEY constraints cannot be marked NOT VALID`. #97746

Operational changes

- The following [cluster settings]({% link v23.1/cluster-settings.md %}), which control rebalancing and upreplication behavior in the face of IO-overloaded storage, have been deprecated: - `kv.allocator.l0_sublevels_threshold` - `kv.allocator.l0_sublevels_threshold_enforce` - These cluster settings have been replaced by internal mechanisms. [#97142][#97142] + These cluster settings have been replaced by internal mechanisms. #97142 -- Max timeout-to-intent resolution has been added to prevent intent resolution from becoming stuck indefinitely and blocking other ranges attempting to resolve intents. [#91815][#91815] -- Nodes are now considered suspect when rejoining a cluster and cannot accept lease transfers for one `server.time_after_store_suspect` window, which defaults to 30 seconds. [#97532][#97532] +- Max timeout-to-intent resolution has been added to prevent intent resolution from becoming stuck indefinitely and blocking other ranges attempting to resolve intents. #91815 +- Nodes are now considered suspect when rejoining a cluster and cannot accept lease transfers for one `server.time_after_store_suspect` window, which defaults to 30 seconds. #97532

Command-line changes

-- The SQL shell ([`cockroach sql`]({% link v23.1/cockroach-sql.md %}), [`cockroach demo`]({% link v23.1/cockroach-demo.md %})) now supports the client-side commands `\l`, `\dn`, `\d`, `\di`, `\dm`, `\ds`, `\dt`, `\dv`, `\dC`, `\dT`, `\dd`, `\dg`, `\du` and `\dd` in a similar manner to PostgreSQL, including the modifier flags `S` and `+`, for convenience for users migrating from PostgreSQL. A notable difference is that when a pattern argument is specified, it should use the SQL `LIKE` syntax (with `%` representing the wildcard character) instead of PostgreSQL's glob-like syntax (with `*` representing wildcards). [#88061][#88061] +- The SQL shell ([`cockroach sql`]({% link v23.1/cockroach-sql.md %}), [`cockroach demo`]({% link v23.1/cockroach-demo.md %})) now supports the client-side commands `\l`, `\dn`, `\d`, `\di`, `\dm`, `\ds`, `\dt`, `\dv`, `\dC`, `\dT`, `\dd`, `\dg`, `\du` and `\dd` in a similar manner to PostgreSQL, including the modifier flags `S` and `+`, for convenience for users migrating from PostgreSQL. A notable difference is that when a pattern argument is specified, it should use the SQL `LIKE` syntax (with `%` representing the wildcard character) instead of PostgreSQL's glob-like syntax (with `*` representing wildcards). #88061

DB Console changes

@@ -69,39 +69,39 @@ Release Date: March 6, 2023 - `sql.mem.sql.session.prepared.max-p99.99` - `sql.mem.sql.session.prepared.max-p99.999` - [#97590][#97590] + #97590 -- Active execution information is now shown on the [Statements page]({% link v23.1/ui-statements-page.md %}) even when there is a max size limit error. [#97662][#97662] -- "Retrying" is no longer a status shown in the [Jobs]({% link v23.1/ui-jobs-page.md %}) page. [#97505][#97505] +- Active execution information is now shown on the [Statements page]({% link v23.1/ui-statements-page.md %}) even when there is a max size limit error. #97662 +- "Retrying" is no longer a status shown in the [Jobs]({% link v23.1/ui-jobs-page.md %}) page. #97505

Bug fixes

-- Transaction uncertainty intervals are now correctly configured for reverse scans, to prevent reverse scans from serving stale reads when clocks in a cluster are skewed. [#97443][#97443] -- The formatting of uniqueness violation errors now matches the corresponding errors from PostgreSQL. [#96914][#96914] -- Previously, when a new column name would require quoting due to mixed-case or special characters, [`ALTER TABLE ... ADD COLUMN`]({% link v23.1/alter-table.md %}#add-column) would not run in an explicit or multi-statement transaction. This is now fixed. [#97514][#97514] -- Fixed a bug when formatting [`CREATE TYPE` statements for user-defined types]({% link v23.1/create-type.md %}) which require quoting which might prevent those statements from round-tripping. [#97514][#97514] -- Using subqueries in user-defined functions without an `AS` clause is now supported, for consistency with the syntax supported outside of user-defined functions. [#97515][#97515] -- Fixed a rare bug introduced before v22.1.x that could cause a projected expression to replace column references with the wrong values. [#97554][#97554] -- Cross-descriptor validation on lease renewal is now disabled, because it can starve [online schema changes]({% link v23.1/online-schema-changes.md %}) when there are many descriptors with many foreign key references. [#97630][#97630] -- Fixed a bug with pagination on the [Insights]({% link v23.1/ui-insights-page.md %}) page. [#97640][#97640] -- Columns referenced in partial index predicates and partial unique constraint predicates can no longer be dropped. The [`ALTER TABLE .. DROP COLUMN`]({% link v23.1/alter-table.md %}#drop-column) statement now returns an error with a suggestion to drop the indexes and constraints first. This is a temporary safeguard to prevent users from hitting [#96924][#96924]. This restriction will be lifted when that bug is fixed. [#97372][#97372] -- The [Jobs]({% link v23.1/ui-jobs-page.md %}) page now displays an error state when an error is encountered during data fetching. [#97486][#97486] -- Fixed a bug introduced in v22.1 that caused the internal error `no bytes in account to release ...`. [#97750][#97750] -- The [`COPY FROM`]({% link v23.1/copy.md %}) command now respects the `statement_timeout` and `transaction_timeout` [cluster settings]({% link v23.1/cluster-settings.md %}). [#97808][#97808] -- [`COPY FROM`]({% link v23.1/copy.md %}) commands now appear in the output of the [`SHOW STATEMENTS`]({% link v23.1/show-statements.md %}) command. [#97808][#97808] -- Fixed an error where querying a `pg_catalog` table included information about a temporary table created in another session. [#97727][#97727] +- Transaction uncertainty intervals are now correctly configured for reverse scans, to prevent reverse scans from serving stale reads when clocks in a cluster are skewed. #97443 +- The formatting of uniqueness violation errors now matches the corresponding errors from PostgreSQL. #96914 +- Previously, when a new column name would require quoting due to mixed-case or special characters, [`ALTER TABLE ... ADD COLUMN`]({% link v23.1/alter-table.md %}#add-column) would not run in an explicit or multi-statement transaction. This is now fixed. #97514 +- Fixed a bug when formatting [`CREATE TYPE` statements for user-defined types]({% link v23.1/create-type.md %}) which require quoting which might prevent those statements from round-tripping. #97514 +- Using subqueries in user-defined functions without an `AS` clause is now supported, for consistency with the syntax supported outside of user-defined functions. #97515 +- Fixed a rare bug introduced before v22.1.x that could cause a projected expression to replace column references with the wrong values. #97554 +- Cross-descriptor validation on lease renewal is now disabled, because it can starve [online schema changes]({% link v23.1/online-schema-changes.md %}) when there are many descriptors with many foreign key references. #97630 +- Fixed a bug with pagination on the [Insights]({% link v23.1/ui-insights-page.md %}) page. #97640 +- Columns referenced in partial index predicates and partial unique constraint predicates can no longer be dropped. The [`ALTER TABLE .. DROP COLUMN`]({% link v23.1/alter-table.md %}#drop-column) statement now returns an error with a suggestion to drop the indexes and constraints first. This is a temporary safeguard to prevent users from hitting #96924. This restriction will be lifted when that bug is fixed. #97372 +- The [Jobs]({% link v23.1/ui-jobs-page.md %}) page now displays an error state when an error is encountered during data fetching. #97486 +- Fixed a bug introduced in v22.1 that caused the internal error `no bytes in account to release ...`. #97750 +- The [`COPY FROM`]({% link v23.1/copy.md %}) command now respects the `statement_timeout` and `transaction_timeout` [cluster settings]({% link v23.1/cluster-settings.md %}). #97808 +- [`COPY FROM`]({% link v23.1/copy.md %}) commands now appear in the output of the [`SHOW STATEMENTS`]({% link v23.1/show-statements.md %}) command. #97808 +- Fixed an error where querying a `pg_catalog` table included information about a temporary table created in another session. #97727

Performance improvements

-- If the [session setting]({% link v23.1/set-vars.md %}) `optimizer_use_improved_split_disjunction_for_joins` is `true`, the optimizer now creates a better query plan in some cases where an inner, semi, or anti join contains a join predicate with a disjuction (`OR` condition). [#97696][#97696] +- If the [session setting]({% link v23.1/set-vars.md %}) `optimizer_use_improved_split_disjunction_for_joins` is `true`, the optimizer now creates a better query plan in some cases where an inner, semi, or anti join contains a join predicate with a disjuction (`OR` condition). #97696

Miscellaneous

-- UDFs can now return the `RECORD` result type, which represents any tuple. For example, `CREATE FUNCTION f() RETURNS RECORD AS 'SELECT * FROM t' LANGUAGE SQL;` is equivalent to `CREATE FUNCTION f() RETURNS t AS 'SELECT * FROM t' LANGUAGE SQL;`. [#97199][#97199] -- The parameters for [delegated snapshots](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#snapshots) have been marked as internal. [#97408][#97408] -- Fixed an error when calling [`CREATE OR REPLACE FUNCTION`]({% link v23.1/create-function.md %}) with a [user-defined return type]({% link v23.1/create-type.md %}) if the user-defined type was modified after the original [user-defined function]({% link v23.1/create-function.md %}) was created. The command now succeeds as long as the function body returns output that matches the modified user-defined type. [#97274][#97274] -- Columns with referenced constraints can now be dropped. [#97579][#97579] -- Index cascades with a dependent inbound foreign key can now be dropped. [#97065][#97065] +- UDFs can now return the `RECORD` result type, which represents any tuple. For example, `CREATE FUNCTION f() RETURNS RECORD AS 'SELECT * FROM t' LANGUAGE SQL;` is equivalent to `CREATE FUNCTION f() RETURNS t AS 'SELECT * FROM t' LANGUAGE SQL;`. #97199 +- The parameters for [delegated snapshots](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#snapshots) have been marked as internal. #97408 +- Fixed an error when calling [`CREATE OR REPLACE FUNCTION`]({% link v23.1/create-function.md %}) with a [user-defined return type]({% link v23.1/create-type.md %}) if the user-defined type was modified after the original [user-defined function]({% link v23.1/create-function.md %}) was created. The command now succeeds as long as the function body returns output that matches the modified user-defined type. #97274 +- Columns with referenced constraints can now be dropped. #97579 +- Index cascades with a dependent inbound foreign key can now be dropped. #97065
@@ -114,44 +114,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#88061]: https://github.com/cockroachdb/cockroach/pull/88061 -[#91815]: https://github.com/cockroachdb/cockroach/pull/91815 -[#92966]: https://github.com/cockroachdb/cockroach/pull/92966 -[#96350]: https://github.com/cockroachdb/cockroach/pull/96350 -[#96382]: https://github.com/cockroachdb/cockroach/pull/96382 -[#96914]: https://github.com/cockroachdb/cockroach/pull/96914 -[#97021]: https://github.com/cockroachdb/cockroach/pull/97021 -[#97065]: https://github.com/cockroachdb/cockroach/pull/97065 -[#97142]: https://github.com/cockroachdb/cockroach/pull/97142 -[#97148]: https://github.com/cockroachdb/cockroach/pull/97148 -[#97199]: https://github.com/cockroachdb/cockroach/pull/97199 -[#97226]: https://github.com/cockroachdb/cockroach/pull/97226 -[#97274]: https://github.com/cockroachdb/cockroach/pull/97274 -[#97372]: https://github.com/cockroachdb/cockroach/pull/97372 -[#97408]: https://github.com/cockroachdb/cockroach/pull/97408 -[#97429]: https://github.com/cockroachdb/cockroach/pull/97429 -[#97443]: https://github.com/cockroachdb/cockroach/pull/97443 -[#97486]: https://github.com/cockroachdb/cockroach/pull/97486 -[#97487]: https://github.com/cockroachdb/cockroach/pull/97487 -[#97501]: https://github.com/cockroachdb/cockroach/pull/97501 -[#97505]: https://github.com/cockroachdb/cockroach/pull/97505 -[#97514]: https://github.com/cockroachdb/cockroach/pull/97514 -[#97515]: https://github.com/cockroachdb/cockroach/pull/97515 -[#97532]: https://github.com/cockroachdb/cockroach/pull/97532 -[#97554]: https://github.com/cockroachdb/cockroach/pull/97554 -[#97579]: https://github.com/cockroachdb/cockroach/pull/97579 -[#97590]: https://github.com/cockroachdb/cockroach/pull/97590 -[#97630]: https://github.com/cockroachdb/cockroach/pull/97630 -[#97640]: https://github.com/cockroachdb/cockroach/pull/97640 -[#97642]: https://github.com/cockroachdb/cockroach/pull/97642 -[#97656]: https://github.com/cockroachdb/cockroach/pull/97656 -[#97662]: https://github.com/cockroachdb/cockroach/pull/97662 -[#97696]: https://github.com/cockroachdb/cockroach/pull/97696 -[#97727]: https://github.com/cockroachdb/cockroach/pull/97727 -[#97746]: https://github.com/cockroachdb/cockroach/pull/97746 -[#97750]: https://github.com/cockroachdb/cockroach/pull/97750 -[#97808]: https://github.com/cockroachdb/cockroach/pull/97808 -[8c6cf2877]: https://github.com/cockroachdb/cockroach/commit/8c6cf2877 -[a53a8d354]: https://github.com/cockroachdb/cockroach/commit/a53a8d354 -[f84bd02ae]: https://github.com/cockroachdb/cockroach/commit/f84bd02ae -[#96924]: https://github.com/cockroachdb/cockroach/issue/97808 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.6.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.6.md index 2b6eaefb977..3d538ad9044 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.6.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.6.md @@ -6,36 +6,36 @@ Release Date: March 13, 2023

SQL language changes

-- Previously, an `ALTER TABLE .. ADD UNIQUE .. NOT VALID` statement would be processed by ignoring the `NOT VALID` qualifier. This is not in keeping with PostgreSQL, which would throw an error instead. Now, CockroachDB throws the same error as PostgreSQL for such a statement: "UNIQUE constraints cannot be marked NOT VALID". [#97758][#97758] -- Introduced a new internal virtual table `crdb_internal.node_memory_monitors`, which exposes all of the current reservations with the memory accounting system on a single node. Access to the table requires the `VIEWACTIVITY` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) (or the legacy `VIEWACTIVITY` [role option](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#role-options)), or the `VIEWACTIVITYREDACTED` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) (or the legacy `VIEWACTIVITYREDACTED` [role option](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#role-options)) defined. [#97657][#97657] -- Each type cast is now expressible as a function, e.g., `now()::date` can be expressed as `date(now())`. [#97093][#97093] -- Added support for a new syntax to provide options to the [`COPY`]({% link v23.1/copy.md %}) statement. The options can now be given in a comma-separated list enclosed by parentheses. The old syntax is still supported. [#97863][#97863] -- Added support for the syntax `CREATE DATABASE IF NOT EXISTS ... WITH OWNER`. [#97951][#97951] -- Added a new internal [built-in function]({% link v23.1/functions-and-operators.md %}#trigrams-functions), `crdb_internal.redactable_sql_constants`, which can be used to redact SQL statements passed in as strings. [#97834][#97834] -- Added an `error_code` column to the `crdb_internal.cluster_execution_insights` and `crdb_internal.node_execution_insights`virtual tables, which contains the error code for a failed execution. Also added a `last_error_code` column to the `crdb_internal.cluster_txn_execution_insights` and `crdb_internal.node_txn_execution_insights` virtual tables, which contains the error code of the last failed statement in that transaction. [#97046][#97046] -- Added a new internal [built-in function]({% link v23.1/functions-and-operators.md %}#trigrams-functions), `crdb_internal.redact`, which replaces substrings surrounded by redaction markers with the redacted marker. [#98008][#98008] +- Previously, an `ALTER TABLE .. ADD UNIQUE .. NOT VALID` statement would be processed by ignoring the `NOT VALID` qualifier. This is not in keeping with PostgreSQL, which would throw an error instead. Now, CockroachDB throws the same error as PostgreSQL for such a statement: "UNIQUE constraints cannot be marked NOT VALID". #97758 +- Introduced a new internal virtual table `crdb_internal.node_memory_monitors`, which exposes all of the current reservations with the memory accounting system on a single node. Access to the table requires the `VIEWACTIVITY` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) (or the legacy `VIEWACTIVITY` [role option](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#role-options)), or the `VIEWACTIVITYREDACTED` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) (or the legacy `VIEWACTIVITYREDACTED` [role option](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#role-options)) defined. #97657 +- Each type cast is now expressible as a function, e.g., `now()::date` can be expressed as `date(now())`. #97093 +- Added support for a new syntax to provide options to the [`COPY`]({% link v23.1/copy.md %}) statement. The options can now be given in a comma-separated list enclosed by parentheses. The old syntax is still supported. #97863 +- Added support for the syntax `CREATE DATABASE IF NOT EXISTS ... WITH OWNER`. #97951 +- Added a new internal [built-in function]({% link v23.1/functions-and-operators.md %}#trigrams-functions), `crdb_internal.redactable_sql_constants`, which can be used to redact SQL statements passed in as strings. #97834 +- Added an `error_code` column to the `crdb_internal.cluster_execution_insights` and `crdb_internal.node_execution_insights`virtual tables, which contains the error code for a failed execution. Also added a `last_error_code` column to the `crdb_internal.cluster_txn_execution_insights` and `crdb_internal.node_txn_execution_insights` virtual tables, which contains the error code of the last failed statement in that transaction. #97046 +- Added a new internal [built-in function]({% link v23.1/functions-and-operators.md %}#trigrams-functions), `crdb_internal.redact`, which replaces substrings surrounded by redaction markers with the redacted marker. #98008

Command-line changes

-- The [`cockroach node decommission`]({% link v23.1/cockroach-node.md %}) operation now validates the ability of the node to complete a decommission before attempting it, given the cluster configuration and the ranges with replicas present on the node. When errors are detected that would result in the inability to complete node decommission, they will be printed to stderr and the command will exit, instead of marking the node as `decommissioning` and beginning the node decommission process. Strict readiness evaluation mode can be used by setting`--checks=strict`. In this case, any ranges that need preliminary actions prior to replacement for the decommission process (e.g., ranges that are not yet fully up-replicated) will block the decommission process. Validation can be skipped by using the flag `--checks=skip`. [#96100][#96100] +- The [`cockroach node decommission`]({% link v23.1/cockroach-node.md %}) operation now validates the ability of the node to complete a decommission before attempting it, given the cluster configuration and the ranges with replicas present on the node. When errors are detected that would result in the inability to complete node decommission, they will be printed to stderr and the command will exit, instead of marking the node as `decommissioning` and beginning the node decommission process. Strict readiness evaluation mode can be used by setting`--checks=strict`. In this case, any ranges that need preliminary actions prior to replacement for the decommission process (e.g., ranges that are not yet fully up-replicated) will block the decommission process. Validation can be skipped by using the flag `--checks=skip`. #96100

DB Console changes

-- Updated the description for [Suboptimal Insight]({% link v23.1/ui-insights-page.md %}) and added a **Learn more** link to it. [#97719][#97719] +- Updated the description for [Suboptimal Insight]({% link v23.1/ui-insights-page.md %}) and added a **Learn more** link to it. #97719

Bug fixes

-- The unquoted value `none` is now allowed as the value in a [`SET`]({% link v23.1/set-vars.md %}) statement. [#97816][#97816] -- `IMPORT INTO ... DELIMITED DATA` will now correctly handle quoted fields that contain unescaped newlines. [#97545][#97545] -- Previously, casting an `inet` to a string type omitted the mask if a mask was not provided. This was not in keeping with PostgreSQL and is now resolved. [#97093][#97093] -- Fixed link encoding on links to database/table/index pages. [#97893][#97893] -- Fixed the [`SHOW CREATE ALL {TYPES|SCHEMAS|TABLES}`]({% link v23.1/show-create.md %}) commands so they handle database names that have mixed-case, hyphens, or quotes. [#97915][#97915] -- Tables referenced in index recommendations are now fully qualified, ensuring that they are not ambiguous. [#97262][#97262] -- Changed the database used for [SQL API]({% link v23.1/sql-statements.md %}) calls, to no longer use `defaultdb`, which was causing error messages on some pages when that database no longer exists. [#97990][#97990] +- The unquoted value `none` is now allowed as the value in a [`SET`]({% link v23.1/set-vars.md %}) statement. #97816 +- `IMPORT INTO ... DELIMITED DATA` will now correctly handle quoted fields that contain unescaped newlines. #97545 +- Previously, casting an `inet` to a string type omitted the mask if a mask was not provided. This was not in keeping with PostgreSQL and is now resolved. #97093 +- Fixed link encoding on links to database/table/index pages. #97893 +- Fixed the [`SHOW CREATE ALL {TYPES|SCHEMAS|TABLES}`]({% link v23.1/show-create.md %}) commands so they handle database names that have mixed-case, hyphens, or quotes. #97915 +- Tables referenced in index recommendations are now fully qualified, ensuring that they are not ambiguous. #97262 +- Changed the database used for [SQL API]({% link v23.1/sql-statements.md %}) calls, to no longer use `defaultdb`, which was causing error messages on some pages when that database no longer exists. #97990

Build changes

-- Changes to source files in `pkg/ui/workspaces/db-console` now properly bust the build cache, and are consistently included in local builds. [#97956][#97956] +- Changes to source files in `pkg/ui/workspaces/db-console` now properly bust the build cache, and are consistently included in local builds. #97956
@@ -45,25 +45,3 @@ This release includes 69 merged PRs by 39 authors.
-[#96100]: https://github.com/cockroachdb/cockroach/pull/96100 -[#97046]: https://github.com/cockroachdb/cockroach/pull/97046 -[#97093]: https://github.com/cockroachdb/cockroach/pull/97093 -[#97262]: https://github.com/cockroachdb/cockroach/pull/97262 -[#97534]: https://github.com/cockroachdb/cockroach/pull/97534 -[#97545]: https://github.com/cockroachdb/cockroach/pull/97545 -[#97657]: https://github.com/cockroachdb/cockroach/pull/97657 -[#97719]: https://github.com/cockroachdb/cockroach/pull/97719 -[#97758]: https://github.com/cockroachdb/cockroach/pull/97758 -[#97784]: https://github.com/cockroachdb/cockroach/pull/97784 -[#97816]: https://github.com/cockroachdb/cockroach/pull/97816 -[#97834]: https://github.com/cockroachdb/cockroach/pull/97834 -[#97863]: https://github.com/cockroachdb/cockroach/pull/97863 -[#97893]: https://github.com/cockroachdb/cockroach/pull/97893 -[#97915]: https://github.com/cockroachdb/cockroach/pull/97915 -[#97935]: https://github.com/cockroachdb/cockroach/pull/97935 -[#97951]: https://github.com/cockroachdb/cockroach/pull/97951 -[#97956]: https://github.com/cockroachdb/cockroach/pull/97956 -[#97990]: https://github.com/cockroachdb/cockroach/pull/97990 -[#98008]: https://github.com/cockroachdb/cockroach/pull/98008 -[8a5e74a01]: https://github.com/cockroachdb/cockroach/commit/8a5e74a01 -[eae9c4ff4]: https://github.com/cockroachdb/cockroach/commit/eae9c4ff4 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.7.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.7.md index f4e1578c9af..a4dd149f779 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.7.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.7.md @@ -6,49 +6,49 @@ Release Date: March 20, 2023

Security updates

-- The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting]({% link v23.1/cluster-settings.md %}) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2]({% link releases/v22.2.md %}). [#98254][#98254] +- The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting]({% link v23.1/cluster-settings.md %}) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2]({% link releases/v22.2.md %}). #98254

General changes

-- CockroachDB now uses the soft memory limit of Go runtime by default. This feature of Go has been available since v22.2 by setting the `GOMEMLIMIT` environment variable. Now it is enabled by default, which should reduce the likelihood of the CockroachDB process [OOMing]({% link v23.1/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). This soft memory limit can be disabled by specifying `--max-go-memory=0` to [`cockroach start`]({% link v23.1/cockroach-start.md %}). [#97666][#97666] -- Previously, the output of [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) was limited to show unfinished jobs and finished jobs from the last 14 days. This change makes the command show all changefeed jobs, regardless of if they finished and when they finished. Note that jobs still obey the [cluster setting]({% link v23.1/cluster-settings.md %}#setting-jobs-retention-time) `jobs.retention_time`. Completed jobs older than that time are deleted. Fixes: https://github.com/cockroachdb/cockroach/issues/97883 [#98175][#98175] +- CockroachDB now uses the soft memory limit of Go runtime by default. This feature of Go has been available since v22.2 by setting the `GOMEMLIMIT` environment variable. Now it is enabled by default, which should reduce the likelihood of the CockroachDB process [OOMing]({% link v23.1/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). This soft memory limit can be disabled by specifying `--max-go-memory=0` to [`cockroach start`]({% link v23.1/cockroach-start.md %}). #97666 +- Previously, the output of [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) was limited to show unfinished jobs and finished jobs from the last 14 days. This change makes the command show all changefeed jobs, regardless of if they finished and when they finished. Note that jobs still obey the [cluster setting]({% link v23.1/cluster-settings.md %}#setting-jobs-retention-time) `jobs.retention_time`. Completed jobs older than that time are deleted. Fixes: cockroach#97883 #98175

{{ site.data.products.enterprise }} edition changes

-- Sinkless [changefeeds]({% link v23.1/create-changefeed.md %}) that use the [`AS SELECT`]({% link v23.1/selection-queries.md %}) syntax now require an enterprise license. [#98241][#98241] -- [External connections]({% link v23.1/create-external-connection.md %}) can now be used as the URI value for a Confluent schema registry. For example, `CREATE EXTERNAL CONNECTION reg AS "https://example.cloud?opt=val"; CREATE CHANGEFEED FOR foo WITH format='avro',confluent_schema_registry='external://reg'` [#97140][#97140] -- [Backup schedules]({% link v23.1/create-schedule-for-backup.md %}) created or altered to have the option `on_previous_running` will have the full backup schedule created with the user specified option, but will override the incremental backup schedule to always default to `on_previous_running = wait`. This ensures correctness of the backup chains created by the incremental schedule by preventing duplicate incremental jobs from racing against each other. [#98249][#98249] -- [Changefeeds]({% link v23.1/changefeed-sinks.md %}#kafka) to a Kafka sink now support the `OAUTHBEARER` `sasl_mechanism`. [#98053][#98053] -- [Changefeeds]({% link v23.1/changefeed-examples.md %}) running with the `changefeed.mux_rangefeed.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) set to `true` are more efficient, particularly when executing against large tables. [#96756][#96756] +- Sinkless [changefeeds]({% link v23.1/create-changefeed.md %}) that use the [`AS SELECT`]({% link v23.1/selection-queries.md %}) syntax now require an enterprise license. #98241 +- [External connections]({% link v23.1/create-external-connection.md %}) can now be used as the URI value for a Confluent schema registry. For example, `CREATE EXTERNAL CONNECTION reg AS "https://example.cloud?opt=val"; CREATE CHANGEFEED FOR foo WITH format='avro',confluent_schema_registry='external://reg'` #97140 +- [Backup schedules]({% link v23.1/create-schedule-for-backup.md %}) created or altered to have the option `on_previous_running` will have the full backup schedule created with the user specified option, but will override the incremental backup schedule to always default to `on_previous_running = wait`. This ensures correctness of the backup chains created by the incremental schedule by preventing duplicate incremental jobs from racing against each other. #98249 +- [Changefeeds]({% link v23.1/changefeed-sinks.md %}#kafka) to a Kafka sink now support the `OAUTHBEARER` `sasl_mechanism`. #98053 +- [Changefeeds]({% link v23.1/changefeed-examples.md %}) running with the `changefeed.mux_rangefeed.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) set to `true` are more efficient, particularly when executing against large tables. #96756

SQL language changes

-- A regions field was added to the `statistics` column of [`crdb_internal.statement_statistics`](https://www.cockroachlabs.com/docs/v23.1/crdb-internal#statement_statistics), reporting the [regions]({% link v23.1/show-regions.md %}) of the nodes on which the statement was executed. [#95449][#95449] -- The `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}#enforce-home-region) was extended with a new optional preview feature and session setting, which is disabled by default, to dynamically detect and report the home region for [`SELECT`]({% link v23.1/selection-queries.md %}) queries based on the [locality]({% link v23.1/show-locality.md %}) of the queried rows, if different from the region of the [gateway node]({% link v23.1/ui-sessions-page.md %}#session-details-gateway-node). [cockroachdb/cockroach#97827][#97827] -- Added a URL to errors related to the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}#enforce-home-region) that users can view to see additional information about the error. [#97827][#97827] -- Added a new [session setting]({% link v23.1/set-vars.md %}) `enforce_home_region_follower_reads_enabled` as a preview feature to allow errors triggered by the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}#enforce-home-region) to perform reads using [`AS OF SYSTEM TIME follower_read_timestamp()`]({% link v23.1/follower-reads.md %}) in order to find and report a query's [home region]({% link v23.1/multiregion-overview.md %}#table-localities). [#97827][#97827] -- Added a new aggregate [builtin function]({% link v23.1/functions-and-operators.md %}#array-functions) `array_cat_agg`. It behaves similarly to `array_agg(unnest(array_column))`: it takes arrays as its input, unnests them into the array elements which are then aggregated into a single result array. It's similar to concatenating all input arrays into a single array. [#97826][#97826] -- Added a new [session setting]({% link v23.1/set-vars.md %}) `optimizer_always_use_histograms`, which ensures that the [optimizer]({% link v23.1/cost-based-optimizer.md %}) always uses histograms when available to calculate the [statistics]({% link v23.1/cost-based-optimizer.md %}#table-statistics) of every plan that it explores. Enabling this setting can prevent the optimizer from choosing a suboptimal [index]({% link v23.1/indexes.md %}) when statistics for a table are stale. [#98194][#98194] -- Added the `MODIFYSQLCLUSTERSETTING` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges). This privilege allows users the ability to view all [cluster settings]({% link v23.1/cluster-settings.md %}), but only modify those settings with the `sql.defaults.*` prefix. This re-introduces the modify functionality seen with `MODIFYCLUSTERSETTING` CockroachDB [v22.2]({% link releases/v22.2.md %}). [#97521][#97521] -- Added a `status` column to the following [`crdb_internal`](https://www.cockroachlabs.com/docs/v23.1/crdb-internal#) virtual tables: `crdb_internal.cluster_txn_execution_insights` and `crdb_internal.node_txn_execution_insights`. [cockroachdb/cockroach#98217][#98217] -- Added a new session variable `allow_role_memberships_to_change_during_transaction` which can be used to make the [granting]({% link v23.1/grant.md %}) and [revoking]({% link v23.1/revoke.md %}) of [role memberships](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#roles) faster at the cost of some [isolation](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#isolation-levels) claims. By default, when granting or revoking a role from another role, CockroachDB waits for all [transactions]({% link v23.1/transactions.md %}) that are consulting the current set of role memberships to complete. This means that by the time the transaction which performed the grant or revoke operation returns successfully, the user has a proof that no ongoing transaction is relying on the state that existed prior to the change. The downside of this waiting is that it means that `GRANT` and `REVOKE` will take longer than the longest currently executing transaction. In some cases, users do not care about whether concurrent transactions will immediately see the side-effects of the operation, and would instead prefer that the grant or revoke finish rapidly. In order to aid in those cases, the session variable `allow_role_memberships_to_change_during_transaction` has been added. Now, the grant or revoke will only need to wait for the completion of statements in [sessions]({% link v23.1/show-sessions.md %}) which do not have this option set. One can set the option as enabled by default in all sessions in order to accelerate and grant and revoke role operations. [#98370][#98370] -- Fixed a bug where CockroachDB panicked when a user tried to [truncate a table]({% link v23.1/truncate.md %}) which had an ongoing [Row-level TTL]({% link v23.1/row-level-ttl.md %}) change. CockroachDB still does not support truncating a table in this scenario, but instead of panicking an "unimplemented" error is returned. [#98537][#98537] +- A regions field was added to the `statistics` column of [`crdb_internal.statement_statistics`](https://www.cockroachlabs.com/docs/v23.1/crdb-internal#statement_statistics), reporting the [regions]({% link v23.1/show-regions.md %}) of the nodes on which the statement was executed. #95449 +- The `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}#enforce-home-region) was extended with a new optional preview feature and session setting, which is disabled by default, to dynamically detect and report the home region for [`SELECT`]({% link v23.1/selection-queries.md %}) queries based on the [locality]({% link v23.1/show-locality.md %}) of the queried rows, if different from the region of the [gateway node]({% link v23.1/ui-sessions-page.md %}#session-details-gateway-node). [cockroachdb/cockroach#97827]#97827 +- Added a URL to errors related to the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}#enforce-home-region) that users can view to see additional information about the error. #97827 +- Added a new [session setting]({% link v23.1/set-vars.md %}) `enforce_home_region_follower_reads_enabled` as a preview feature to allow errors triggered by the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}#enforce-home-region) to perform reads using [`AS OF SYSTEM TIME follower_read_timestamp()`]({% link v23.1/follower-reads.md %}) in order to find and report a query's [home region]({% link v23.1/multiregion-overview.md %}#table-localities). #97827 +- Added a new aggregate [builtin function]({% link v23.1/functions-and-operators.md %}#array-functions) `array_cat_agg`. It behaves similarly to `array_agg(unnest(array_column))`: it takes arrays as its input, unnests them into the array elements which are then aggregated into a single result array. It's similar to concatenating all input arrays into a single array. #97826 +- Added a new [session setting]({% link v23.1/set-vars.md %}) `optimizer_always_use_histograms`, which ensures that the [optimizer]({% link v23.1/cost-based-optimizer.md %}) always uses histograms when available to calculate the [statistics]({% link v23.1/cost-based-optimizer.md %}#table-statistics) of every plan that it explores. Enabling this setting can prevent the optimizer from choosing a suboptimal [index]({% link v23.1/indexes.md %}) when statistics for a table are stale. #98194 +- Added the `MODIFYSQLCLUSTERSETTING` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges). This privilege allows users the ability to view all [cluster settings]({% link v23.1/cluster-settings.md %}), but only modify those settings with the `sql.defaults.*` prefix. This re-introduces the modify functionality seen with `MODIFYCLUSTERSETTING` CockroachDB [v22.2]({% link releases/v22.2.md %}). #97521 +- Added a `status` column to the following [`crdb_internal`](https://www.cockroachlabs.com/docs/v23.1/crdb-internal#) virtual tables: `crdb_internal.cluster_txn_execution_insights` and `crdb_internal.node_txn_execution_insights`. [cockroachdb/cockroach#98217]#98217 +- Added a new session variable `allow_role_memberships_to_change_during_transaction` which can be used to make the [granting]({% link v23.1/grant.md %}) and [revoking]({% link v23.1/revoke.md %}) of [role memberships](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#roles) faster at the cost of some [isolation](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#isolation-levels) claims. By default, when granting or revoking a role from another role, CockroachDB waits for all [transactions]({% link v23.1/transactions.md %}) that are consulting the current set of role memberships to complete. This means that by the time the transaction which performed the grant or revoke operation returns successfully, the user has a proof that no ongoing transaction is relying on the state that existed prior to the change. The downside of this waiting is that it means that `GRANT` and `REVOKE` will take longer than the longest currently executing transaction. In some cases, users do not care about whether concurrent transactions will immediately see the side-effects of the operation, and would instead prefer that the grant or revoke finish rapidly. In order to aid in those cases, the session variable `allow_role_memberships_to_change_during_transaction` has been added. Now, the grant or revoke will only need to wait for the completion of statements in [sessions]({% link v23.1/show-sessions.md %}) which do not have this option set. One can set the option as enabled by default in all sessions in order to accelerate and grant and revoke role operations. #98370 +- Fixed a bug where CockroachDB panicked when a user tried to [truncate a table]({% link v23.1/truncate.md %}) which had an ongoing [Row-level TTL]({% link v23.1/row-level-ttl.md %}) change. CockroachDB still does not support truncating a table in this scenario, but instead of panicking an "unimplemented" error is returned. #98537

Operational changes

-- [Range leases]({% link v23.1/architecture/overview.md %}#architecture-leaseholder) will no longer be transferred to [stores]({% link v23.1/cockroach-start.md %}#store) which are IO overloaded. [#97587][#97587] -- The environment variable `COCKROACH_IGNORE_CLUSTER_SETTINGS` can be used to [start a node]({% link v23.1/cockroach-start.md %}) so that it ignores all stored [cluster setting]({% link v23.1/cluster-settings.md %}) values in an emergency. [#97805][#97805] -- Introduce two [cluster settings]({% link v23.1/cluster-settings.md %}) to control disk utilization thresholds for [replica allocation](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer): `kv.allocator.rebalance_to_max_disk_utilization_threshold`, which controls the maximum disk utilization before a store will never be used as a rebalance target, and `kv.allocator.max_disk_utilization_threshold`, which controls maximum disk utilization before a store will never be used as a rebalance or allocation target _and_ will actively have [replicas](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-replica) moved off of it. [#97409][#97409] -- The [cluster setting]({% link v23.1/cluster-settings.md %}) `kv.trace.slow_request_stacks.threshold` can be used to attach available stack history from [tracer snapshots]({% link v23.1/show-trace.md %}) to traces of [slow requests](https://www.cockroachlabs.com/docs/v23.1/logging#sql_perf). [#97895][#97895] -- Added a new [metric]({% link v23.1/metrics.md %}) `changefeed.schema_registry.retry_count`. This measures the number of request retries performed when sending requests to the [changefeed schema registry]({% link v23.1/changefeed-examples.md %}). Observing a nonzero value may indicate improper configuration of the schema registry or changefeed parameters. [#98338][#98338] -- The `kv.range_split.load_cpu_threshold` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. [#98250][#98250] -- The `kv.allocator.lease_io_overload_threshold_enforcement` [cluster setting]({% link v23.1/cluster-settings.md %}) value which disables enforcement is updated to be spelled correctly as "ignore" rather than "ingore". [cockroachdb/cockroach#98543][#98543] -- The [range lease](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-leaseholder) duration can now be adjusted via the environment variable `COCKROACH_RANGE_LEASE_DURATION`. Users are advised to exercise caution when adjusting this, and consider the relationship with e.g. [Raft](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#raft) election timeouts and network timeouts. [#98616][#98616] +- [Range leases]({% link v23.1/architecture/overview.md %}#architecture-leaseholder) will no longer be transferred to [stores]({% link v23.1/cockroach-start.md %}#store) which are IO overloaded. #97587 +- The environment variable `COCKROACH_IGNORE_CLUSTER_SETTINGS` can be used to [start a node]({% link v23.1/cockroach-start.md %}) so that it ignores all stored [cluster setting]({% link v23.1/cluster-settings.md %}) values in an emergency. #97805 +- Introduce two [cluster settings]({% link v23.1/cluster-settings.md %}) to control disk utilization thresholds for [replica allocation](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer): `kv.allocator.rebalance_to_max_disk_utilization_threshold`, which controls the maximum disk utilization before a store will never be used as a rebalance target, and `kv.allocator.max_disk_utilization_threshold`, which controls maximum disk utilization before a store will never be used as a rebalance or allocation target _and_ will actively have [replicas](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-replica) moved off of it. #97409 +- The [cluster setting]({% link v23.1/cluster-settings.md %}) `kv.trace.slow_request_stacks.threshold` can be used to attach available stack history from [tracer snapshots]({% link v23.1/show-trace.md %}) to traces of [slow requests](https://www.cockroachlabs.com/docs/v23.1/logging#sql_perf). #97895 +- Added a new [metric]({% link v23.1/metrics.md %}) `changefeed.schema_registry.retry_count`. This measures the number of request retries performed when sending requests to the [changefeed schema registry]({% link v23.1/changefeed-examples.md %}). Observing a nonzero value may indicate improper configuration of the schema registry or changefeed parameters. #98338 +- The `kv.range_split.load_cpu_threshold` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. #98250 +- The `kv.allocator.lease_io_overload_threshold_enforcement` [cluster setting]({% link v23.1/cluster-settings.md %}) value which disables enforcement is updated to be spelled correctly as "ignore" rather than "ingore". [cockroachdb/cockroach#98543]#98543 +- The [range lease](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-leaseholder) duration can now be adjusted via the environment variable `COCKROACH_RANGE_LEASE_DURATION`. Users are advised to exercise caution when adjusting this, and consider the relationship with e.g. [Raft](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#raft) election timeouts and network timeouts. #98616

Command-line changes

-- [`cockroach sql`]({% link v23.1/cockroach-sql.md %}) and [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) now support the client-side command `\s` to display the previous command history. [#98035][#98035] -- Added a new flag `--max-go-memory` to the [`cockroach start`]({% link v23.1/cockroach-start.md %}) command. It controls the soft memory limit on the Go runtime which adjusts the behavior of the Go garbage collector to try keeping the memory usage under the soft memory limit (the limit is "soft" in a sense that it is not enforced if live objects (RSS) exceed it). Similar to the [`--max-sql-memory` flag]({% link v23.1/cockroach-start.md %}), the new flag `--max-go-memory` accepts numbers interpreted as bytes, size suffixes (e.g. `1GB` and `1GiB`) or a percentage of physical memory (e.g. `.25`). If left unspecified, the flag defaults to 2.25x of `--max-sql-memory` (subject to `--max-go-memory + 1.15 x --cache` not exceeding 90% of available RAM). Set to `0` to disable the soft memory limit (not recommended). If the `GOMEMLIMIT` env var is set and `--max-go-memory` is not, then the value from the env var is used; if both are set, then the flag takes precedence. [#97666][#97666] +- [`cockroach sql`]({% link v23.1/cockroach-sql.md %}) and [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) now support the client-side command `\s` to display the previous command history. #98035 +- Added a new flag `--max-go-memory` to the [`cockroach start`]({% link v23.1/cockroach-start.md %}) command. It controls the soft memory limit on the Go runtime which adjusts the behavior of the Go garbage collector to try keeping the memory usage under the soft memory limit (the limit is "soft" in a sense that it is not enforced if live objects (RSS) exceed it). Similar to the [`--max-sql-memory` flag]({% link v23.1/cockroach-start.md %}), the new flag `--max-go-memory` accepts numbers interpreted as bytes, size suffixes (e.g. `1GB` and `1GiB`) or a percentage of physical memory (e.g. `.25`). If left unspecified, the flag defaults to 2.25x of `--max-sql-memory` (subject to `--max-go-memory + 1.15 x --cache` not exceeding 90% of available RAM). Set to `0` to disable the soft memory limit (not recommended). If the `GOMEMLIMIT` env var is set and `--max-go-memory` is not, then the value from the env var is used; if both are set, then the flag takes precedence. #97666 - Here are a few examples of how the default value is calculated on a machine with 16GiB of RAM. In the first two lines we use the default formula `2.25x --max-sql-memory`. In the third line, the default formula results in exceeding the upper bound on total usage (including the cache), so we use the upper bound determined as `0.9 * total RAM - 1.15 * cache size`. In the fourth line, the default formula results in 225MiB which is smaller than the lower bound of 256MiB, so we bump the value to that lower bound. In the fifth line, we use the value specified by the user (even though it is smaller than the lower bound on the default value). | Command line flags | Computed max SQL memory | Computed cache size | Computed max Go memory | @@ -59,44 +59,44 @@ Release Date: March 20, 2023 | --max-sql-memory=100MiB | 100MiB | 128MiB | 256MiB | | --max-sql-memory=.4 --cache=.2 --max-go-memory=100MiB | 6.4GiB | 3.2GiB | 100MiB | -- The `--drain-wait` argument to the [`cockroach node drain`]({% link v23.1/cockroach-node.md %}) command will be automatically increased if the command detects that it is smaller than the sum of the [cluster settings]({% link v23.1/node-shutdown.md %}#cluster-settings) `server.shutdown.drain_wait`, `server.shutdown.connection_wait`, `server.shutdown.query_wait` times two, and `server.shutdown.lease_transfer_wait`. If the `--drain-wait` argument is 0, then no timeout is used. This recommendation [was already documented]({% link v23.1/node-shutdown.md %}#drain-timeout), but now the advice will be applied automatically. [#98390][#98390] +- The `--drain-wait` argument to the [`cockroach node drain`]({% link v23.1/cockroach-node.md %}) command will be automatically increased if the command detects that it is smaller than the sum of the [cluster settings]({% link v23.1/node-shutdown.md %}#cluster-settings) `server.shutdown.drain_wait`, `server.shutdown.connection_wait`, `server.shutdown.query_wait` times two, and `server.shutdown.lease_transfer_wait`. If the `--drain-wait` argument is 0, then no timeout is used. This recommendation [was already documented]({% link v23.1/node-shutdown.md %}#drain-timeout), but now the advice will be applied automatically. #98390

DB Console changes

-- Fixed the error `Cannot read properties of undefined (reading 'length')` which could cause [DB Console]({% link v23.1/ui-overview.md %}) pages to fail to load. [#98222][#98222] -- Added a new metric to the [Hardware dashboard]({% link v23.1/ui-hardware-dashboard.md %}) showing the system-wide CPU usage in addition to the existing CockroachDB CPU usage. [#98187][#98187] -- Users will see an upgrade error message when a response from the SQL-over-HTTP API (from `/api/v2/sql/`) says that a relation or column does not exist. [cockroachdb/cockroach#98312][#98312] -- The description of the average QPS graph in the [Replication Metrics Dashboard]({% link v23.1/ui-replication-dashboard.md %}) no longer claims the average is exponentially weighted. [#98270][#98270] -- The metric `rebalancing.cpunanospersecond` is now included in the [Replication Metrics Dashboard]({% link v23.1/ui-replication-dashboard.md %}). [#98270][#98270] -- Added an error code column to the insights table for a failed execution to the statement and transaction detail views on the [Insights Page]({% link v23.1/ui-insights-page.md %}). Added a status column to the statement and transaction workload insights tables on the [Insights Page]({% link v23.1/ui-insights-page.md %}). [cockroachdb/cockroach#97138][#97138] +- Fixed the error `Cannot read properties of undefined (reading 'length')` which could cause [DB Console]({% link v23.1/ui-overview.md %}) pages to fail to load. #98222 +- Added a new metric to the [Hardware dashboard]({% link v23.1/ui-hardware-dashboard.md %}) showing the system-wide CPU usage in addition to the existing CockroachDB CPU usage. #98187 +- Users will see an upgrade error message when a response from the SQL-over-HTTP API (from `/api/v2/sql/`) says that a relation or column does not exist. [cockroachdb/cockroach#98312]#98312 +- The description of the average QPS graph in the [Replication Metrics Dashboard]({% link v23.1/ui-replication-dashboard.md %}) no longer claims the average is exponentially weighted. #98270 +- The metric `rebalancing.cpunanospersecond` is now included in the [Replication Metrics Dashboard]({% link v23.1/ui-replication-dashboard.md %}). #98270 +- Added an error code column to the insights table for a failed execution to the statement and transaction detail views on the [Insights Page]({% link v23.1/ui-insights-page.md %}). Added a status column to the statement and transaction workload insights tables on the [Insights Page]({% link v23.1/ui-insights-page.md %}). [cockroachdb/cockroach#97138]#97138

Bug fixes

-- RPC connections between nodes now require RPC connections to be established in both directions, otherwise the connection will be closed. This is done to prevent asymmetric network partitions where nodes are able to send outbound messages but not receive inbound messages, which could result in persistent unavailability. This behavior can be disabled by the [cluster setting]({% link v23.1/cluster-settings.md %}) `rpc.dialback.enabled`. [#94778][#94778] -- The owner of the public schema can now be changed using [`ALTER SCHEMA public OWNER TO new_owner`]({% link v23.1/alter-schema.md %}). [#98000][#98000] -- Fixed a bug in which [common table expressions]({% link v23.1/common-table-expressions.md %}) (CTEs) marked as `WITH RECURSIVE` which were not actually recursive could return incorrect results. This could happen if the CTE used a `UNION ALL`, because the [optimizer]({% link v23.1/cost-based-optimizer.md %}) incorrectly converted the `UNION ALL` to a `UNION`. This bug had existed since support for recursive CTEs was first added in v20.1. [#98042][#98042] +- RPC connections between nodes now require RPC connections to be established in both directions, otherwise the connection will be closed. This is done to prevent asymmetric network partitions where nodes are able to send outbound messages but not receive inbound messages, which could result in persistent unavailability. This behavior can be disabled by the [cluster setting]({% link v23.1/cluster-settings.md %}) `rpc.dialback.enabled`. #94778 +- The owner of the public schema can now be changed using [`ALTER SCHEMA public OWNER TO new_owner`]({% link v23.1/alter-schema.md %}). #98000 +- Fixed a bug in which [common table expressions]({% link v23.1/common-table-expressions.md %}) (CTEs) marked as `WITH RECURSIVE` which were not actually recursive could return incorrect results. This could happen if the CTE used a `UNION ALL`, because the [optimizer]({% link v23.1/cost-based-optimizer.md %}) incorrectly converted the `UNION ALL` to a `UNION`. This bug had existed since support for recursive CTEs was first added in v20.1. #98042 - Internal queries that are executed in order to serve a client-initiated query already appeared in [statistics]({% link v23.1/cost-based-optimizer.md %}#table-statistics) with an `application_name` prefixed by the string `$$`. But this name was not used in the output of [`SHOW QUERIES`]({% link v23.1/show-statements.md %}). Now, `SHOW QUERIES` also shows the `$$` prefix for these types of queries. -- Fixed a bug in evaluation of `ANY`, `SOME`, and `ALL` [sub-operators]({% link v23.1/functions-and-operators.md %}#operators) that would cause expressions like `NULL = ANY(ARRAY[]::INT[])` to return `NULL` instead of `False`. [cockroachdb/cockroach#97948][#97948] -- Fixed a bug that could crash the process when a query contained a literal [tuple expression]({% link v23.1/scalar-expressions.md %}#tuple-constructors) with more than two elements and only a single label, e.g., `((1, 2, 3) AS foo)`. [#98142][#98142] -- Fixed a bug where certain special character combinations in the [`options` field in connection URLs]({% link v23.1/connection-parameters.md %}#supported-options-parameters) were not properly supported by CockroachDB. [#98302][#98302] -- Fixed a bug where the stats columns on the [Transaction Fingerprints overview page]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view) was continuously incrementing. The fix was to ensure we don't re-use the stats object between re-renders by creating a new copy of the stats for every aggregation. [cockroachdb/cockroach#98307][#98307] -- Fixed a bug where [transactions]({% link v23.1/transactions.md %}) that performed a [`SELECT FOR UPDATE`]({% link v23.1/select-for-update.md %}) across multiple [ranges](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-range) but never performed writes could fail to eagerly clean up their [locks](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#writing) after commit. Future transactions that encountered these abandoned locks could be delayed for 50ms before unlocking them. [#98044][#98044] -- Fixed a bug which could result in some [`CREATE INDEX`]({% link v23.1/create-index.md %}) statements to fail with the error `failed to verify keys for Scan`. [#98262][#98262] -- Fixed a bug where CockroachDB could encounter an internal error `concurrent txn use detected`. The bug was introduced in [v22.2.0]({% link releases/v22.2.md %}). [#98120][#98120] -- Fixed a bug where if an [`UPDATE`]({% link v23.1/update.md %}) was performed during an on-going [`ADD COLUMN`]({% link v23.1/alter-table.md %}#add-column) or [`DROP COLUMN`]({% link v23.1/alter-table.md %}#drop-column) on a table, the update could incorrectly fail due to a duplicate key error. [#98354][#98354] -- Fixed a bug where it was possible for CockroachDB to temporarily not respect [zone configurations]({% link v23.1/configure-replication-zones.md %}) other than [the default zone config]({% link v23.1/configure-replication-zones.md %}#view-the-default-replication-zone). This could only happen for a short window of a few seconds after nodes with existing [replicas](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-replica) were [restarted]({% link v23.1/cockroach-start.md %}), and self-rectified (also within seconds). [#98422][#98422] +- Fixed a bug in evaluation of `ANY`, `SOME`, and `ALL` [sub-operators]({% link v23.1/functions-and-operators.md %}#operators) that would cause expressions like `NULL = ANY(ARRAY[]::INT[])` to return `NULL` instead of `False`. [cockroachdb/cockroach#97948]#97948 +- Fixed a bug that could crash the process when a query contained a literal [tuple expression]({% link v23.1/scalar-expressions.md %}#tuple-constructors) with more than two elements and only a single label, e.g., `((1, 2, 3) AS foo)`. #98142 +- Fixed a bug where certain special character combinations in the [`options` field in connection URLs]({% link v23.1/connection-parameters.md %}#supported-options-parameters) were not properly supported by CockroachDB. #98302 +- Fixed a bug where the stats columns on the [Transaction Fingerprints overview page]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view) was continuously incrementing. The fix was to ensure we don't re-use the stats object between re-renders by creating a new copy of the stats for every aggregation. [cockroachdb/cockroach#98307]#98307 +- Fixed a bug where [transactions]({% link v23.1/transactions.md %}) that performed a [`SELECT FOR UPDATE`]({% link v23.1/select-for-update.md %}) across multiple [ranges](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-range) but never performed writes could fail to eagerly clean up their [locks](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#writing) after commit. Future transactions that encountered these abandoned locks could be delayed for 50ms before unlocking them. #98044 +- Fixed a bug which could result in some [`CREATE INDEX`]({% link v23.1/create-index.md %}) statements to fail with the error `failed to verify keys for Scan`. #98262 +- Fixed a bug where CockroachDB could encounter an internal error `concurrent txn use detected`. The bug was introduced in [v22.2.0]({% link releases/v22.2.md %}). #98120 +- Fixed a bug where if an [`UPDATE`]({% link v23.1/update.md %}) was performed during an on-going [`ADD COLUMN`]({% link v23.1/alter-table.md %}#add-column) or [`DROP COLUMN`]({% link v23.1/alter-table.md %}#drop-column) on a table, the update could incorrectly fail due to a duplicate key error. #98354 +- Fixed a bug where it was possible for CockroachDB to temporarily not respect [zone configurations]({% link v23.1/configure-replication-zones.md %}) other than [the default zone config]({% link v23.1/configure-replication-zones.md %}#view-the-default-replication-zone). This could only happen for a short window of a few seconds after nodes with existing [replicas](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-replica) were [restarted]({% link v23.1/cockroach-start.md %}), and self-rectified (also within seconds). #98422 - This manifested in a few ways: - If [`num_replicas`](https://www.cockroachlabs.com/docs/v23.1/configure-replication-zones#num_replicas) was set to something other than 3, we would still add or remove replicas to get to 3x replication. - If [`num_voters`](https://www.cockroachlabs.com/docs/v23.1/configure-replication-zones#num_voters) was set explicitly to get a mix of voting and [non-voting replicas](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#non-voting-replicas), it would be ignored. CockroachDB could possibly even remove non-voting replicas. - If [`range_min_bytes`]({% link v23.1/configure-replication-zones.md %}#range-min-bytes) or [`range_max_bytes`]({% link v23.1/configure-replication-zones.md %}#range-max-bytes) were changed from their default values of 128 MiB and 512 MiB respectively, we would instead try to size ranges to be within [128 MiB, 512MiB]. This could appear as an excess amount of [range splits](https://www.cockroachlabs.com/docs/v23.1/architecture/distribution-layer#range-splits) or [merges](https://www.cockroachlabs.com/docs/v23.1/architecture/distribution-layer#range-merges), as visible in the [Replication Dashboard]({% link v23.1/ui-replication-dashboard.md %}) under "Range Operations". - If [`gc.ttlseconds`]({% link v23.1/configure-replication-zones.md %}#gc-ttlseconds) was set to something other than the default 90000 seconds (25h), we would still only GC data older than 90000s. If the GC TTL was set to something larger than 25h, [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}) queries going further back could start failing. For GC TTLs less than the default, clusters would observe increased disk usage due to more retained [MVCC garbage]({% link v23.1/performance-recipes.md %}#too-many-mvcc-values). - If [`constraints`]({% link v23.1/configure-replication-zones.md %}#constraints), [`lease_preferences`](https://www.cockroachlabs.com/docs/v23.1/configure-replication-zones#lease_preferences) or [`voter_constraints`](https://www.cockroachlabs.com/docs/v23.1/configure-replication-zones#voter_constraints) were set, they would be ignored. Range data and [leases](https://www.cockroachlabs.com/docs/v23.1/architecture/glossary#architecture-leaseholder) would possibly be moved outside where prescribed. -- Allow users with the `VIEWACTIVITY`/`VIEWACTIVITYREDACTED` [permissions](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#role-options) to access the [`crdb_internal.ranges_no_leases`]({% link v23.1/crdb-internal.md %}) table, necessary to view important DB Console pages (specifically, the [Databases Page]({% link v23.1/ui-databases-page.md %}), including database details, and database tables). [#98535][#98535] -- Fixed a bug that caused incorrect results when comparisons of [tuples]({% link v23.1/scalar-expressions.md %}#tuple-constructors) were done using the `ANY` [operator]({% link v23.1/functions-and-operators.md %}#operators). For example, an expression like (x, y) = ANY (SELECT a, b FROM t WHERE ...) could return `true` instead of the correct result of `NULL` when `x` and `y` were `NULL`, or `a` and `b` were `NULL`. This could only occur if the [subquery was correlated]({% link v23.1/subqueries.md %}#correlated-subqueries), i.e., it references columns from the outer part of the query. This bug was present since the [cost-based optimizer]({% link v23.1/cost-based-optimizer.md %}) was introduced in [v2.1]({% link releases/v2.1.md %}). [#98700][#98700] +- Allow users with the `VIEWACTIVITY`/`VIEWACTIVITYREDACTED` [permissions](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#role-options) to access the [`crdb_internal.ranges_no_leases`]({% link v23.1/crdb-internal.md %}) table, necessary to view important DB Console pages (specifically, the [Databases Page]({% link v23.1/ui-databases-page.md %}), including database details, and database tables). #98535 +- Fixed a bug that caused incorrect results when comparisons of [tuples]({% link v23.1/scalar-expressions.md %}#tuple-constructors) were done using the `ANY` [operator]({% link v23.1/functions-and-operators.md %}#operators). For example, an expression like (x, y) = ANY (SELECT a, b FROM t WHERE ...) could return `true` instead of the correct result of `NULL` when `x` and `y` were `NULL`, or `a` and `b` were `NULL`. This could only occur if the [subquery was correlated]({% link v23.1/subqueries.md %}#correlated-subqueries), i.e., it references columns from the outer part of the query. This bug was present since the [cost-based optimizer]({% link v23.1/cost-based-optimizer.md %}) was introduced in [v2.1]({% link releases/v2.1.md %}). #98700

Miscellaneous

-- Added two views to the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) catalog: `crdb_internal.statement_statistics_persisted`, which surfaces data in the persisted `system.statement_statistics` table, and `crdb_internal.transaction_statistics_persisted`, which surfaces the `system.transaction_statistics` table. [cockroachdb/cockroach#98261][#98261] +- Added two views to the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) catalog: `crdb_internal.statement_statistics_persisted`, which surfaces data in the persisted `system.statement_statistics` table, and `crdb_internal.transaction_statistics_persisted`, which surfaces the `system.transaction_statistics` table. [cockroachdb/cockroach#98261]#98261
@@ -106,59 +106,3 @@ This release includes 245 merged PRs by 65 authors.
-[#94778]: https://github.com/cockroachdb/cockroach/pull/94778 -[#95449]: https://github.com/cockroachdb/cockroach/pull/95449 -[#96449]: https://github.com/cockroachdb/cockroach/pull/96449 -[#96756]: https://github.com/cockroachdb/cockroach/pull/96756 -[#97138]: https://github.com/cockroachdb/cockroach/pull/97138 -[#97140]: https://github.com/cockroachdb/cockroach/pull/97140 -[#97409]: https://github.com/cockroachdb/cockroach/pull/97409 -[#97521]: https://github.com/cockroachdb/cockroach/pull/97521 -[#97569]: https://github.com/cockroachdb/cockroach/pull/97569 -[#97587]: https://github.com/cockroachdb/cockroach/pull/97587 -[#97666]: https://github.com/cockroachdb/cockroach/pull/97666 -[#97805]: https://github.com/cockroachdb/cockroach/pull/97805 -[#97826]: https://github.com/cockroachdb/cockroach/pull/97826 -[#97827]: https://github.com/cockroachdb/cockroach/pull/97827 -[#97895]: https://github.com/cockroachdb/cockroach/pull/97895 -[#97948]: https://github.com/cockroachdb/cockroach/pull/97948 -[#97991]: https://github.com/cockroachdb/cockroach/pull/97991 -[#97995]: https://github.com/cockroachdb/cockroach/pull/97995 -[#98000]: https://github.com/cockroachdb/cockroach/pull/98000 -[#98035]: https://github.com/cockroachdb/cockroach/pull/98035 -[#98042]: https://github.com/cockroachdb/cockroach/pull/98042 -[#98044]: https://github.com/cockroachdb/cockroach/pull/98044 -[#98053]: https://github.com/cockroachdb/cockroach/pull/98053 -[#98060]: https://github.com/cockroachdb/cockroach/pull/98060 -[#98092]: https://github.com/cockroachdb/cockroach/pull/98092 -[#98120]: https://github.com/cockroachdb/cockroach/pull/98120 -[#98135]: https://github.com/cockroachdb/cockroach/pull/98135 -[#98142]: https://github.com/cockroachdb/cockroach/pull/98142 -[#98175]: https://github.com/cockroachdb/cockroach/pull/98175 -[#98186]: https://github.com/cockroachdb/cockroach/pull/98186 -[#98187]: https://github.com/cockroachdb/cockroach/pull/98187 -[#98194]: https://github.com/cockroachdb/cockroach/pull/98194 -[#98217]: https://github.com/cockroachdb/cockroach/pull/98217 -[#98222]: https://github.com/cockroachdb/cockroach/pull/98222 -[#98241]: https://github.com/cockroachdb/cockroach/pull/98241 -[#98249]: https://github.com/cockroachdb/cockroach/pull/98249 -[#98250]: https://github.com/cockroachdb/cockroach/pull/98250 -[#98254]: https://github.com/cockroachdb/cockroach/pull/98254 -[#98261]: https://github.com/cockroachdb/cockroach/pull/98261 -[#98262]: https://github.com/cockroachdb/cockroach/pull/98262 -[#98270]: https://github.com/cockroachdb/cockroach/pull/98270 -[#98302]: https://github.com/cockroachdb/cockroach/pull/98302 -[#98307]: https://github.com/cockroachdb/cockroach/pull/98307 -[#98312]: https://github.com/cockroachdb/cockroach/pull/98312 -[#98338]: https://github.com/cockroachdb/cockroach/pull/98338 -[#98354]: https://github.com/cockroachdb/cockroach/pull/98354 -[#98370]: https://github.com/cockroachdb/cockroach/pull/98370 -[#98390]: https://github.com/cockroachdb/cockroach/pull/98390 -[#98403]: https://github.com/cockroachdb/cockroach/pull/98403 -[#98422]: https://github.com/cockroachdb/cockroach/pull/98422 -[#98445]: https://github.com/cockroachdb/cockroach/pull/98445 -[#98535]: https://github.com/cockroachdb/cockroach/pull/98535 -[#98537]: https://github.com/cockroachdb/cockroach/pull/98537 -[#98543]: https://github.com/cockroachdb/cockroach/pull/98543 -[#98616]: https://github.com/cockroachdb/cockroach/pull/98616 -[#98700]: https://github.com/cockroachdb/cockroach/pull/98700 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.8.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.8.md index eda36313d00..100c55eefc6 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.8.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.8.md @@ -6,72 +6,72 @@ Release Date: March 27, 2023

Security updates

-- Previously, users could gain unauthorized access to [statement diagnostic bundles]({% link v23.1/ui-statements-page.md %}#diagnostics) they did not create if they requested the bundle through an HTTP request to `/_admin/v1/stmtbundle/` and correctly guessed its (non-secret) `ID`. This change ensures that only authorized SQL users can request statement diagnostic bundles. [#99051][#99051] +- Previously, users could gain unauthorized access to [statement diagnostic bundles]({% link v23.1/ui-statements-page.md %}#diagnostics) they did not create if they requested the bundle through an HTTP request to `/_admin/v1/stmtbundle/` and correctly guessed its (non-secret) `ID`. This change ensures that only authorized SQL users can request statement diagnostic bundles. #99051

General changes

-- Increased the specificity of log messages to help troubleshoot [DB Console SSO]({% link v23.1/sso-db-console.md %}) issues. [#98522][#98522] +- Increased the specificity of log messages to help troubleshoot [DB Console SSO]({% link v23.1/sso-db-console.md %}) issues. #98522

{{ site.data.products.enterprise }} edition changes

-- The MuxRangefeed client, which is enabled with the `changefeed.mux_rangefeed.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}), is now more efficient when running against large-scale workloads. [#97957][#97957] -- The `server.oidc_authentication.claim_json_key` [cluster setting]({% link v23.1/cluster-settings.md %}) for DB Console SSO now accepts list-valued token claims. [#98522][#98522] -- Added the `WITH` key_column option to override the message metadata key for [changefeeds]({% link v23.1/changefeed-examples.md %}). This changes the key hashed to determine Kafka partitions. It does not affect the output of `key_in_value` or the domain of the per-key ordering guarantee. [#98806][#98806] -- The [Node Map]({% link v23.1/ui-cluster-overview-page.md %}#node-map) now shows normalized CPU usage. [#98225][#98225] +- The MuxRangefeed client, which is enabled with the `changefeed.mux_rangefeed.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}), is now more efficient when running against large-scale workloads. #97957 +- The `server.oidc_authentication.claim_json_key` [cluster setting]({% link v23.1/cluster-settings.md %}) for DB Console SSO now accepts list-valued token claims. #98522 +- Added the `WITH` key_column option to override the message metadata key for [changefeeds]({% link v23.1/changefeed-examples.md %}). This changes the key hashed to determine Kafka partitions. It does not affect the output of `key_in_value` or the domain of the per-key ordering guarantee. #98806 +- The [Node Map]({% link v23.1/ui-cluster-overview-page.md %}#node-map) now shows normalized CPU usage. #98225

SQL language changes

-- Fixed a bug where the check constraint on an `OID` type column results in a panic in the legacy schema changer. [#98800][#98800] -- Added a new `WITH REDACT` option to the following statements: [`SHOW CREATE`]({% link v23.1/show-create.md %}), [`SHOW CREATE TABLE`]({% link v23.1/show-create.md %}#show-the-create-table-statement-for-a-table), and [`SHOW CREATE VIEW`]({% link v23.1/show-create.md %}#show-the-create-view-statement-for-a-view) which replaces constants and literals in the printed `CREATE` statement with the redacted marker, `‹×›`. [#98251][#98251] -- Added support for the `REDACT` flag to the following variants of [`EXPLAIN`]({% link v23.1/explain.md %}): `EXPLAIN (OPT)`, `EXPLAIN (OPT, CATALOG)`, `EXPLAIN (OPT, MEMO)`, `EXPLAIN (OPT, TYPES)`, and `EXPLAIN (OPT, VERBOSE)`. These `EXPLAIN` statements will have constants, literal values, parameter values, and any other user data redacted in the output. [#97549][#97549] -- Disallowed the [`RESTORE`]({% link v23.1/restore.md %}) of backups taken on a cluster version older than the minimum binary version the current cluster can interoperate with. This is described in an updated version of the policy outlined in ["Restoring Backups Across Versions"]({% link v23.1/restoring-backups-across-versions.md %}). [#98597][#98597] -- Bulk [`COPY FROM`]({% link v23.1/copy.md %}) statements are now processed with a vectorized insert and can be anywhere from 0.5x to 5x faster. Typical hardware and schemas should see a 2x improvement. Vectorized inserts are only used for `COPY` statements and are not yet applied to regular inserts. Both the `vectorize` and `copy_fast_path_enabled` session variables can be used to disable this feature. [#98605][#98605] -- Added [stemming and stopword-eliminating](https://www.cockroachlabs.com/blog/full-text-indexing-search/) text search configurations for English, Danish, Dutch, Finnish, French, German, Hungarian, Italian, Norwegian, Portuguese, Russian, Spanish, Swedish, and Turkish. [#97677][#97677] -- Added the `system.statement_activity` and `system.transaction_activity` tables. These tables are used to populate the [SQL Activity]({% link v23.1/ui-statements-page.md %}) pages, and contain the top N statements and transactions based on different key columns. [#99179][#99179] -- Added helper text for [`UPDATE`]({% link v23.1/update.md %}) to include `FROM `. [#99301][#99301] -- Added the `default_text_search_config` variable for compatibility with single-argument variants to the following text search functions: `to_tsvector`, `to_tsquery`, `phraseto_tsquery`, and `plainto_tsquery`, which use the value of `default_text_search_config` instead of expecting one to be included as in the two-argument variants. The default value of this setting is `english`. [#99323][#99323] -- Added the `ts_rank` function for ranking text search query results. [#99323][#99323] -- Renamed the `coordinator_locality` option in `BACKUP` to [`EXECUTION LOCALITY`]({% link v23.1/take-locality-restricted-backups.md %}). This option allows the user to control the nodes involved in the execution of a backup job, including the processing of row data and job coordination. For example, defining an execution locality for a backup job could reduce latency when a cluster is running heavy workloads and has a frequent backup schedule. [#99176][#99176] +- Fixed a bug where the check constraint on an `OID` type column results in a panic in the legacy schema changer. #98800 +- Added a new `WITH REDACT` option to the following statements: [`SHOW CREATE`]({% link v23.1/show-create.md %}), [`SHOW CREATE TABLE`]({% link v23.1/show-create.md %}#show-the-create-table-statement-for-a-table), and [`SHOW CREATE VIEW`]({% link v23.1/show-create.md %}#show-the-create-view-statement-for-a-view) which replaces constants and literals in the printed `CREATE` statement with the redacted marker, `‹×›`. #98251 +- Added support for the `REDACT` flag to the following variants of [`EXPLAIN`]({% link v23.1/explain.md %}): `EXPLAIN (OPT)`, `EXPLAIN (OPT, CATALOG)`, `EXPLAIN (OPT, MEMO)`, `EXPLAIN (OPT, TYPES)`, and `EXPLAIN (OPT, VERBOSE)`. These `EXPLAIN` statements will have constants, literal values, parameter values, and any other user data redacted in the output. #97549 +- Disallowed the [`RESTORE`]({% link v23.1/restore.md %}) of backups taken on a cluster version older than the minimum binary version the current cluster can interoperate with. This is described in an updated version of the policy outlined in ["Restoring Backups Across Versions"]({% link v23.1/restoring-backups-across-versions.md %}). #98597 +- Bulk [`COPY FROM`]({% link v23.1/copy.md %}) statements are now processed with a vectorized insert and can be anywhere from 0.5x to 5x faster. Typical hardware and schemas should see a 2x improvement. Vectorized inserts are only used for `COPY` statements and are not yet applied to regular inserts. Both the `vectorize` and `copy_fast_path_enabled` session variables can be used to disable this feature. #98605 +- Added [stemming and stopword-eliminating](https://www.cockroachlabs.com/blog/full-text-indexing-search/) text search configurations for English, Danish, Dutch, Finnish, French, German, Hungarian, Italian, Norwegian, Portuguese, Russian, Spanish, Swedish, and Turkish. #97677 +- Added the `system.statement_activity` and `system.transaction_activity` tables. These tables are used to populate the [SQL Activity]({% link v23.1/ui-statements-page.md %}) pages, and contain the top N statements and transactions based on different key columns. #99179 +- Added helper text for [`UPDATE`]({% link v23.1/update.md %}) to include `FROM `. #99301 +- Added the `default_text_search_config` variable for compatibility with single-argument variants to the following text search functions: `to_tsvector`, `to_tsquery`, `phraseto_tsquery`, and `plainto_tsquery`, which use the value of `default_text_search_config` instead of expecting one to be included as in the two-argument variants. The default value of this setting is `english`. #99323 +- Added the `ts_rank` function for ranking text search query results. #99323 +- Renamed the `coordinator_locality` option in `BACKUP` to [`EXECUTION LOCALITY`]({% link v23.1/take-locality-restricted-backups.md %}). This option allows the user to control the nodes involved in the execution of a backup job, including the processing of row data and job coordination. For example, defining an execution locality for a backup job could reduce latency when a cluster is running heavy workloads and has a frequent backup schedule. #99176

Operational changes

-- Checkpoint directories that can be created in the rare event of range inconsistency are now clearly indicated as `pending` until they are fully populated. This helps operators distinguish valid checkpoints from corrupted ones. [#99119][#99119] -- [Prometheus]({% link v23.1/monitor-cockroachdb-with-prometheus.md %}) metrics available at the `_status/vars` path now contain a `node_id` label that identifies the node they were scraped from. [#99235][#99235] +- Checkpoint directories that can be created in the rare event of range inconsistency are now clearly indicated as `pending` until they are fully populated. This helps operators distinguish valid checkpoints from corrupted ones. #99119 +- [Prometheus]({% link v23.1/monitor-cockroachdb-with-prometheus.md %}) metrics available at the `_status/vars` path now contain a `node_id` label that identifies the node they were scraped from. #99235

DB Console changes

-- The [Statement & Transaction Fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprint-page) pages no longer poll data, to simplify the user experience and reduce any performance impact. [#98331][#98331] -- Data on the [Statement Fingerprint Details]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) page no longer updates automatically every 5mins. [#99298][#99298] -- Updated the Jobs table column name from "Last Modified Time" to "Last Execution Time". [#99021][#99021] -- On the [SQL Activity Fingerprints]({% link v23.1/ui-overview.md %}#sql-activity) pages, users will not see stats that have not yet been flushed to disk. [#98815][#98815] -- Users can now request `top-k statements by % runtime` on the SQL Activity Fingerprints pages. [#98815][#98815] -- Added Search Criteria to the [Statements]({% link v23.1/ui-statements-page.md %}) and [Transactions]({% link v23.1/ui-transactions-page.md %}) pages, and updated the UX with improvements. [#98815][#98815] -- Added badges for each selected filter on the [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) and [Insights]({% link v23.1/ui-insights-page.md %}) pages. [#98988][#98988] -- The default request sort for the [Statement Fingerprints Overview]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) page is now `% of All Runtime`. [#99298][#99298] -- Fixed a bug where the table's `CREATE` statement would not display correctly on the [Table Details]({% link v23.1/ui-databases-page.md %}#table-details) page. [#99434][#99434] -- Added an assertion on the KV side to prevent other existing or future attempts of LeafTxn issuing locking requests. This ensures the KV API is used as agreed upon and can be helpful in debugging latency issues caused by holding locks. [#99412][#99412] +- The [Statement & Transaction Fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprint-page) pages no longer poll data, to simplify the user experience and reduce any performance impact. #98331 +- Data on the [Statement Fingerprint Details]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) page no longer updates automatically every 5mins. #99298 +- Updated the Jobs table column name from "Last Modified Time" to "Last Execution Time". #99021 +- On the [SQL Activity Fingerprints]({% link v23.1/ui-overview.md %}#sql-activity) pages, users will not see stats that have not yet been flushed to disk. #98815 +- Users can now request `top-k statements by % runtime` on the SQL Activity Fingerprints pages. #98815 +- Added Search Criteria to the [Statements]({% link v23.1/ui-statements-page.md %}) and [Transactions]({% link v23.1/ui-transactions-page.md %}) pages, and updated the UX with improvements. #98815 +- Added badges for each selected filter on the [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) and [Insights]({% link v23.1/ui-insights-page.md %}) pages. #98988 +- The default request sort for the [Statement Fingerprints Overview]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) page is now `% of All Runtime`. #99298 +- Fixed a bug where the table's `CREATE` statement would not display correctly on the [Table Details]({% link v23.1/ui-databases-page.md %}#table-details) page. #99434 +- Added an assertion on the KV side to prevent other existing or future attempts of LeafTxn issuing locking requests. This ensures the KV API is used as agreed upon and can be helpful in debugging latency issues caused by holding locks. #99412

Bug fixes

-- Fixed a rare panic in `upstream etcd-io/raft` when a message appends race with log compaction. [#98721][#98721] -- In the DB Console Stats pages, issuing a new request for stats while one is pending is now allowed and will replace the pending request. [#98331][#98331] -- Fixed a bug in which [`SET avoid_buffering = true`]({% link v23.1/set-vars.md %}) could produce a crash on subsequent operations. [#98290][#98290] -- Fixed a bug where using [`ST_Transform`](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators#st_transform) could result in a memory leak. [#98740][#98740] -- Fixed internal errors in [`SHOW JOBS`]({% link v23.1/show-jobs.md %}) statements that have a `WITH` clause. [#98389][#98389] -- Previously, the [`ADD COLUMN ... DEFAULT cluster_logical_timestamp()`]({% link v23.1/alter-table.md %}#add-column) statement would crash the node and leave the table in a corrupt state. The root cause is a `nil` pointer dereference. The bug is now fixed by returning an unimplemented error and hence disallowing using the [builtin function]({% link v23.1/functions-and-operators.md %}#array-functions) as the default value when backfilling. [#98696][#98696] -- Fixed a bug that could prevent a cached query with a user-defined type reference from being invalidated even after a schema change that should prevent the type from being resolved. [#96045][#96045] -- Fixed a bug that could prevent a cached query from being invalidated when a [UDF]({% link v23.1/user-defined-functions.md %}) referenced by that query was altered or dropped. [#96045][#96045] -- Fixed the replacement of in-flight requests for `KeyedCachedDataReducer`s to prevent permanent loading on requests stuck on an `inFlight` status. [#99095][#99095] -- Improved the reliability of latency data in v23.1 clusters. [#99294][#99294] +- Fixed a rare panic in `upstream etcd-io/raft` when a message appends race with log compaction. #98721 +- In the DB Console Stats pages, issuing a new request for stats while one is pending is now allowed and will replace the pending request. #98331 +- Fixed a bug in which [`SET avoid_buffering = true`]({% link v23.1/set-vars.md %}) could produce a crash on subsequent operations. #98290 +- Fixed a bug where using [`ST_Transform`](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators#st_transform) could result in a memory leak. #98740 +- Fixed internal errors in [`SHOW JOBS`]({% link v23.1/show-jobs.md %}) statements that have a `WITH` clause. #98389 +- Previously, the [`ADD COLUMN ... DEFAULT cluster_logical_timestamp()`]({% link v23.1/alter-table.md %}#add-column) statement would crash the node and leave the table in a corrupt state. The root cause is a `nil` pointer dereference. The bug is now fixed by returning an unimplemented error and hence disallowing using the [builtin function]({% link v23.1/functions-and-operators.md %}#array-functions) as the default value when backfilling. #98696 +- Fixed a bug that could prevent a cached query with a user-defined type reference from being invalidated even after a schema change that should prevent the type from being resolved. #96045 +- Fixed a bug that could prevent a cached query from being invalidated when a [UDF]({% link v23.1/user-defined-functions.md %}) referenced by that query was altered or dropped. #96045 +- Fixed the replacement of in-flight requests for `KeyedCachedDataReducer`s to prevent permanent loading on requests stuck on an `inFlight` status. #99095 +- Improved the reliability of latency data in v23.1 clusters. #99294

Performance improvements

-- The Raft tick interval has been increased from `200ms` to `500ms` in order to reduce per-replica CPU costs, and can now be adjusted via `COCKROACH_RAFT_TICK_INTERVAL`. Dependant parameters such as the Raft election timeout (`COCKROACH_RAFT_ELECTION_TIMEOUT_TICKS`), reproposal timeout (`COCKROACH_RAFT_REPROPOSAL_TIMEOUT_TICKS`), and heartbeat interval (`COCKROACH_RAFT_HEARTBEAT_INTERVAL_TICKS`) have been adjusted such that their wall-time value remains the same. [#98584][#98584] -- The Raft scheduler is now sharded to relieve contention during range Raft processing, which can significantly improve performance at high CPU core counts. [#98854][#98854] +- The Raft tick interval has been increased from `200ms` to `500ms` in order to reduce per-replica CPU costs, and can now be adjusted via `COCKROACH_RAFT_TICK_INTERVAL`. Dependant parameters such as the Raft election timeout (`COCKROACH_RAFT_ELECTION_TIMEOUT_TICKS`), reproposal timeout (`COCKROACH_RAFT_REPROPOSAL_TIMEOUT_TICKS`), and heartbeat interval (`COCKROACH_RAFT_HEARTBEAT_INTERVAL_TICKS`) have been adjusted such that their wall-time value remains the same. #98584 +- The Raft scheduler is now sharded to relieve contention during range Raft processing, which can significantly improve performance at high CPU core counts. #98854

Build changes

-- Running `./dev ui test` (or `bazel test //pkg/ui/workspaces/db-console:jest`) now uses less memory. [#98880][#98880] +- Running `./dev ui test` (or `bazel test //pkg/ui/workspaces/db-console:jest`) now uses less memory. #98880
@@ -84,45 +84,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#96045]: https://github.com/cockroachdb/cockroach/pull/96045 -[#97549]: https://github.com/cockroachdb/cockroach/pull/97549 -[#97677]: https://github.com/cockroachdb/cockroach/pull/97677 -[#97957]: https://github.com/cockroachdb/cockroach/pull/97957 -[#98077]: https://github.com/cockroachdb/cockroach/pull/98077 -[#98225]: https://github.com/cockroachdb/cockroach/pull/98225 -[#98251]: https://github.com/cockroachdb/cockroach/pull/98251 -[#98290]: https://github.com/cockroachdb/cockroach/pull/98290 -[#98331]: https://github.com/cockroachdb/cockroach/pull/98331 -[#98353]: https://github.com/cockroachdb/cockroach/pull/98353 -[#98389]: https://github.com/cockroachdb/cockroach/pull/98389 -[#98522]: https://github.com/cockroachdb/cockroach/pull/98522 -[#98584]: https://github.com/cockroachdb/cockroach/pull/98584 -[#98597]: https://github.com/cockroachdb/cockroach/pull/98597 -[#98605]: https://github.com/cockroachdb/cockroach/pull/98605 -[#98696]: https://github.com/cockroachdb/cockroach/pull/98696 -[#98721]: https://github.com/cockroachdb/cockroach/pull/98721 -[#98740]: https://github.com/cockroachdb/cockroach/pull/98740 -[#98758]: https://github.com/cockroachdb/cockroach/pull/98758 -[#98800]: https://github.com/cockroachdb/cockroach/pull/98800 -[#98806]: https://github.com/cockroachdb/cockroach/pull/98806 -[#98815]: https://github.com/cockroachdb/cockroach/pull/98815 -[#98826]: https://github.com/cockroachdb/cockroach/pull/98826 -[#98844]: https://github.com/cockroachdb/cockroach/pull/98844 -[#98854]: https://github.com/cockroachdb/cockroach/pull/98854 -[#98880]: https://github.com/cockroachdb/cockroach/pull/98880 -[#98988]: https://github.com/cockroachdb/cockroach/pull/98988 -[#99021]: https://github.com/cockroachdb/cockroach/pull/99021 -[#99050]: https://github.com/cockroachdb/cockroach/pull/99050 -[#99051]: https://github.com/cockroachdb/cockroach/pull/99051 -[#99052]: https://github.com/cockroachdb/cockroach/pull/99052 -[#99095]: https://github.com/cockroachdb/cockroach/pull/99095 -[#99119]: https://github.com/cockroachdb/cockroach/pull/99119 -[#99176]: https://github.com/cockroachdb/cockroach/pull/99176 -[#99179]: https://github.com/cockroachdb/cockroach/pull/99179 -[#99235]: https://github.com/cockroachdb/cockroach/pull/99235 -[#99294]: https://github.com/cockroachdb/cockroach/pull/99294 -[#99298]: https://github.com/cockroachdb/cockroach/pull/99298 -[#99301]: https://github.com/cockroachdb/cockroach/pull/99301 -[#99323]: https://github.com/cockroachdb/cockroach/pull/99323 -[#99412]: https://github.com/cockroachdb/cockroach/pull/99412 -[#99434]: https://github.com/cockroachdb/cockroach/pull/99434 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-alpha.9.md b/src/current/_includes/releases/v23.1/v23.1.0-alpha.9.md index 16f6d54e4ed..57f7006ae69 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-alpha.9.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-alpha.9.md @@ -6,24 +6,24 @@ Release Date: April 4, 2023

Backward-incompatible changes

-- The output of the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) command for the `crdb_internal.ranges` and `crdb_internal.ranges_no_leases` tables has been updated, and the previous output is deprecated. To enable the new command output, set the `sql.show_ranges_deprecated_behavior.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false`. The new output will become default in v23.2. [#99618][#99618] +- The output of the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) command for the `crdb_internal.ranges` and `crdb_internal.ranges_no_leases` tables has been updated, and the previous output is deprecated. To enable the new command output, set the `sql.show_ranges_deprecated_behavior.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false`. The new output will become default in v23.2. #99618

{{ site.data.products.enterprise }} edition changes

-- The [`CREATE CHANGEFEED`]({% link v23.1/create-changefeed.md %}) statement now allows you to limit the nodes that can execute a changefeed by including a locality filter in the `WITH` clause. A node can execute the changefeed only if it was started with a matching [`--locality`](https://cockroachlabs.com/docs/v23.1/cockroach-start#locality) flag. Replace `{locality}` with a comma-separated list of key-value pairs. [#99935][#99935] +- The [`CREATE CHANGEFEED`]({% link v23.1/create-changefeed.md %}) statement now allows you to limit the nodes that can execute a changefeed by including a locality filter in the `WITH` clause. A node can execute the changefeed only if it was started with a matching [`--locality`](https://cockroachlabs.com/docs/v23.1/cockroach-start#locality) flag. Replace `{locality}` with a comma-separated list of key-value pairs. #99935

SQL language changes

-- The new `prepared_statements_cache_size` [session setting](https://www.cockroachlabs.com/docs/v22.2/set-vars) helps to prevent [prepared statement]({% link v23.1/savepoint.md %}#savepoints-and-prepared-statements) leaks by automatically deallocating the least-recently-used prepared statements when the cache reaches a given size. [#99254][#99254] +- The new `prepared_statements_cache_size` [session setting](https://www.cockroachlabs.com/docs/v22.2/set-vars) helps to prevent [prepared statement]({% link v23.1/savepoint.md %}#savepoints-and-prepared-statements) leaks by automatically deallocating the least-recently-used prepared statements when the cache reaches a given size. #99254

Operational changes

-- The new `COCKROACH_DISABLE_NODE_AND_TENANT_METRIC_LABELS` [environment variable]({% link v23.1/cockroach-commands.md %}#environment-variables) allows you to suppress metrics from a cluster's [Prometheus endpoint]({% link v23.1/monitoring-and-alerting.md %}#prometheus-endpoint) if they conflict with labels that are applied by external tools that collect metrics from the endpoint. Set the environment variable to a comma-separated list of key-value pairs. [#99820][#99820] +- The new `COCKROACH_DISABLE_NODE_AND_TENANT_METRIC_LABELS` [environment variable]({% link v23.1/cockroach-commands.md %}#environment-variables) allows you to suppress metrics from a cluster's [Prometheus endpoint]({% link v23.1/monitoring-and-alerting.md %}#prometheus-endpoint) if they conflict with labels that are applied by external tools that collect metrics from the endpoint. Set the environment variable to a comma-separated list of key-value pairs. #99820

DB Console changes

-- The [**Index Details**]({% link v23.1/ui-databases-page.md %}#index-details) section of the **Databases** page now displays the list of most-frequently-used index fingerprints to all users, rather than only to `admin` users, because the page now queries a view rather than a system table directly. [#99485][#99485] -- When you search or filter within the [**Statements** page]({% link v23.1/ui-statements-page.md %}) or [**Transactions** page]({% link v23.1/ui-transactions-page.md %}), if you interactively sort the results using a column that was not part of the original query, a warning displays if you are viewing only a subset of the results, along with a suggestion to update the original query. [#99795][#99795] +- The [**Index Details**]({% link v23.1/ui-databases-page.md %}#index-details) section of the **Databases** page now displays the list of most-frequently-used index fingerprints to all users, rather than only to `admin` users, because the page now queries a view rather than a system table directly. #99485 +- When you search or filter within the [**Statements** page]({% link v23.1/ui-statements-page.md %}) or [**Transactions** page]({% link v23.1/ui-transactions-page.md %}), if you interactively sort the results using a column that was not part of the original query, a warning displays if you are viewing only a subset of the results, along with a suggestion to update the original query. #99795

Miscellaneous

@@ -36,19 +36,19 @@ Release Date: April 4, 2023 - `total_estimated_execution_time` - `p99_latency` - [#99417][#99417] + #99417

Bug fixes

-- Fixed pagination bugs when searching or filtering within the [**Databases** page]({% link v23.1/ui-databases-page.md %}) or viewing the details of a database. [#99513][#99513] -- Fixed a rare bug introduced in v22.2.0 that could cause a node to crash with an `attempting to append refresh spans after the tracked timestamp has moved forward` error when querying virtual tables in the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) or [`pg_catalog`]({% link v23.1/pg-catalog.md %}) system catalogs. If you are experiencing this bug, set the `sql.distsql.use_streamer.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false` before upgrading a cluster to v23.1. [#99443][#99443] -- Fixed a bug that could erroneously cause multiple garbage-collection jobs to be created when executing a [`DROP SCHEMA ... CASCADE`]({% link v23.1/drop-schema.md %}) command, one job for each table and one for the cascaded `DROP` itself. [#99706][#99706] -- Fixed a bug in the [**Insights** page]({% link v23.1/ui-insights-page.md %}#schema-insights-tab) that prevented a recommendation to drop an index from being executed if the index's name contained a space. [#100023][#100023] -- Fixed a rare bug that prevented the garbage-collection job for a [`TRUNCATE`]({% link v23.1/truncate.md %}) command from successfully finishing if the table descriptor had already been garbage-collected. The garbage-collection job now succeeds in this situation. [#100009][#100009] -- Fixed a rare bug that could cause a query of a virtual table in the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) or [`pg_catalog`]({% link v23.1/pg-catalog.md %}) system catalog to hang indefinitely if the query returned an error. [#99969][#99969] -- Fixed a bug introduced prior to v21.2 that could cause the SQL gateway node to crash if you [created a view]({% link v23.1/create-view.md %}) with a circular or self-referencing dependencies. This situation no longer crashes the node, and a `cyclic view dependency for relation` error is now logged. [#100159][#100159] -- Several rare bugs have been fixed that could cause corruption in the existing primary index when a rollback occurs concurrent to adding or removing a [column family]({% link v23.1/column-families.md %}). This could lead to subsequent unavailability of the table. [#100030][#100030] -- Fixed a bug that could cause a node to crash with an out-of-memory (OOM) exception when viewing details in the [**Statements** page]({% link v23.1/ui-statements-page.md %}) or [**Transactions** page]({% link v23.1/ui-transactions-page.md %}). [#99550][#99550] +- Fixed pagination bugs when searching or filtering within the [**Databases** page]({% link v23.1/ui-databases-page.md %}) or viewing the details of a database. #99513 +- Fixed a rare bug introduced in v22.2.0 that could cause a node to crash with an `attempting to append refresh spans after the tracked timestamp has moved forward` error when querying virtual tables in the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) or [`pg_catalog`]({% link v23.1/pg-catalog.md %}) system catalogs. If you are experiencing this bug, set the `sql.distsql.use_streamer.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false` before upgrading a cluster to v23.1. #99443 +- Fixed a bug that could erroneously cause multiple garbage-collection jobs to be created when executing a [`DROP SCHEMA ... CASCADE`]({% link v23.1/drop-schema.md %}) command, one job for each table and one for the cascaded `DROP` itself. #99706 +- Fixed a bug in the [**Insights** page]({% link v23.1/ui-insights-page.md %}#schema-insights-tab) that prevented a recommendation to drop an index from being executed if the index's name contained a space. #100023 +- Fixed a rare bug that prevented the garbage-collection job for a [`TRUNCATE`]({% link v23.1/truncate.md %}) command from successfully finishing if the table descriptor had already been garbage-collected. The garbage-collection job now succeeds in this situation. #100009 +- Fixed a rare bug that could cause a query of a virtual table in the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) or [`pg_catalog`]({% link v23.1/pg-catalog.md %}) system catalog to hang indefinitely if the query returned an error. #99969 +- Fixed a bug introduced prior to v21.2 that could cause the SQL gateway node to crash if you [created a view]({% link v23.1/create-view.md %}) with a circular or self-referencing dependencies. This situation no longer crashes the node, and a `cyclic view dependency for relation` error is now logged. #100159 +- Several rare bugs have been fixed that could cause corruption in the existing primary index when a rollback occurs concurrent to adding or removing a [column family]({% link v23.1/column-families.md %}). This could lead to subsequent unavailability of the table. #100030 +- Fixed a bug that could cause a node to crash with an out-of-memory (OOM) exception when viewing details in the [**Statements** page]({% link v23.1/ui-statements-page.md %}) or [**Transactions** page]({% link v23.1/ui-transactions-page.md %}). #99550
@@ -58,23 +58,3 @@ This release includes 75 merged PRs by 41 authors.
-[#100009]: https://github.com/cockroachdb/cockroach/pull/100009 -[#100011]: https://github.com/cockroachdb/cockroach/pull/100011 -[#100023]: https://github.com/cockroachdb/cockroach/pull/100023 -[#100030]: https://github.com/cockroachdb/cockroach/pull/100030 -[#100159]: https://github.com/cockroachdb/cockroach/pull/100159 -[#99254]: https://github.com/cockroachdb/cockroach/pull/99254 -[#99398]: https://github.com/cockroachdb/cockroach/pull/99398 -[#99417]: https://github.com/cockroachdb/cockroach/pull/99417 -[#99443]: https://github.com/cockroachdb/cockroach/pull/99443 -[#99485]: https://github.com/cockroachdb/cockroach/pull/99485 -[#99513]: https://github.com/cockroachdb/cockroach/pull/99513 -[#99550]: https://github.com/cockroachdb/cockroach/pull/99550 -[#99618]: https://github.com/cockroachdb/cockroach/pull/99618 -[#99706]: https://github.com/cockroachdb/cockroach/pull/99706 -[#99795]: https://github.com/cockroachdb/cockroach/pull/99795 -[#99820]: https://github.com/cockroachdb/cockroach/pull/99820 -[#99935]: https://github.com/cockroachdb/cockroach/pull/99935 -[#99969]: https://github.com/cockroachdb/cockroach/pull/99969 -[2dc0229e5]: https://github.com/cockroachdb/cockroach/commit/2dc0229e5 -[ebdec3c98]: https://github.com/cockroachdb/cockroach/commit/ebdec3c98 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-beta.1.md b/src/current/_includes/releases/v23.1/v23.1.0-beta.1.md index ab7a58dbe3b..c394a7e60ac 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-beta.1.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-beta.1.md @@ -6,44 +6,44 @@ Release Date: April 13, 2023

{{ site.data.products.enterprise }} edition changes

-- The [Avro]({% link v23.1/changefeed-messages.md %}#avro) schema registry URI now allows an additional `timeout=T` query parameter, which allows you to change the default timeout for contacting the schema registry. [#99300][#99300] +- The [Avro]({% link v23.1/changefeed-messages.md %}#avro) schema registry URI now allows an additional `timeout=T` query parameter, which allows you to change the default timeout for contacting the schema registry. #99300

SQL language changes

-- Changed the [GC TTL]({% link v23.1/configure-replication-zones.md %}#gc-ttlseconds) on the SQL Stats table to 1h on CockroachDB {{ site.data.products.dedicated }} and Self-Hosted clusters. This change is not applicable to CockroachDB {{ site.data.products.serverless }} clusters. [#100359][#100359] -- When there is no data persisted, show the in-memory data. [#100505][#100505] -- Added two new [cluster settings]({% link v23.1/cluster-settings.md %}) that enable users to change the number of histogram samples and buckets collected when building histograms as part of table statistics collection. While the default values should work for most cases, it may be beneficial to increase the number of samples and buckets for very large tables to avoid creating a histogram that misses important values: [#100662][#100662] +- Changed the [GC TTL]({% link v23.1/configure-replication-zones.md %}#gc-ttlseconds) on the SQL Stats table to 1h on CockroachDB {{ site.data.products.dedicated }} and Self-Hosted clusters. This change is not applicable to CockroachDB {{ site.data.products.serverless }} clusters. #100359 +- When there is no data persisted, show the in-memory data. #100505 +- Added two new [cluster settings]({% link v23.1/cluster-settings.md %}) that enable users to change the number of histogram samples and buckets collected when building histograms as part of table statistics collection. While the default values should work for most cases, it may be beneficial to increase the number of samples and buckets for very large tables to avoid creating a histogram that misses important values: #100662 - `sql.stats.histogram_samples.count` - `sql.stats.histogram_buckets.count` -- Added two new table storage parameters, `sql_stats_histogram_buckets_count` and `sql_stats_histogram_samples_count`. These parameters can be used to override the [cluster settings]({% link v23.1/cluster-settings.md %}) `sql.stats.histogram_buckets.count` and `sql.stats.histogram_samples.count` at the table level, allowing you to change the number of histogram samples and buckets collected when building histograms as part of table statistics collection. While the default values should work for most cases, it may be beneficial to increase the number of samples and buckets for very large tables to avoid creating a histogram that misses important values. [#100662][#100662] +- Added two new table storage parameters, `sql_stats_histogram_buckets_count` and `sql_stats_histogram_samples_count`. These parameters can be used to override the [cluster settings]({% link v23.1/cluster-settings.md %}) `sql.stats.histogram_buckets.count` and `sql.stats.histogram_samples.count` at the table level, allowing you to change the number of histogram samples and buckets collected when building histograms as part of table statistics collection. While the default values should work for most cases, it may be beneficial to increase the number of samples and buckets for very large tables to avoid creating a histogram that misses important values. #100662

Operational changes

-- Introduced seven new timeseries [metrics]({% link v23.1/metrics.md %}) for better visibility into the behavior of storage engine iterators and their internals. [#100445][#100445] +- Introduced seven new timeseries [metrics]({% link v23.1/metrics.md %}) for better visibility into the behavior of storage engine iterators and their internals. #100445 - Added a new [metric]({% link v23.1/metrics.md %}) `range.snapshots.delegate.in-progress` and renamed two metrics: - `range.snapshot.delegate.successes` -> `range.snapshots.delegate.successes` - - `range.snapshot.delegate.failures` -> `range.snapshots.delegate.failures` [#100421][#100421] -- Added two new timeseries [metrics]({% link v23.1/metrics.md %}), providing some observability into the volume of keys preserved by open LSM snapshots: [#100878][#100878] + - `range.snapshot.delegate.failures` -> `range.snapshots.delegate.failures` #100421 +- Added two new timeseries [metrics]({% link v23.1/metrics.md %}), providing some observability into the volume of keys preserved by open LSM snapshots: #100878 - `storage.compactions.keys.pinned.count` - `storage.compactions.keys.pinned.bytes`

DB Console changes

-- Fixed an issue with properly rendering placeholders on the **Node Map** view for [insecure]({% link v23.1/start-a-local-cluster.md %}) clusters. [#100214][#100214] +- Fixed an issue with properly rendering placeholders on the **Node Map** view for [insecure]({% link v23.1/start-a-local-cluster.md %}) clusters. #100214

Bug fixes

-- Fixed a bug which could cause [`SHOW CLUSTER SETTING version`]({% link v23.1/show-cluster-setting.md %}) to hang and return an opaque error while cluster finalization is ongoing. [#100259][#100259] -- Fixed a bug that could cause internal errors and corrupt partial indexes when deleting rows with the `DELETE FROM .. USING` syntax. This bug is only present in alpha versions of v23.1.0. [#100307][#100307] -- The [**Hot Ranges** page]({% link v23.1/ui-hot-ranges-page.md %}) DB Console page would show hot ranges by CPU and not QPS (queries per second), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting]({% link v23.1/cluster-settings.md %}) (default `cpu`). Now the page will always collect statistics based on QPS. [#100211][#100211] -- In rare cases involving overload and schema changes, users could sometimes, transiently, see errors of the form `deadline below read timestamp is nonsensical; txn has would have no chance to commit`. These errors carried and internal pgcode and could not be retried. This form of error is now classified as a retriable error and will be retried automatically either by the client or internally. [#100256][#100256] -- Fixed a bug in the declarative schema changer in v23.1 where unique without index can be incorrectly added in tables with duplicate values if it was added with a [`ALTER TABLE ... ADD/DROP COLUMN`]({% link v23.1/alter-table.md %}) in one `ALTER TABLE` statement. [#100535][#100535] -- Fixed an issue where the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}) did not prevent a locality-optimized anti-join from looking up rows in remote regions. This bug is only present in alpha versions of v23.1.0. [#100735][#100735] +- Fixed a bug which could cause [`SHOW CLUSTER SETTING version`]({% link v23.1/show-cluster-setting.md %}) to hang and return an opaque error while cluster finalization is ongoing. #100259 +- Fixed a bug that could cause internal errors and corrupt partial indexes when deleting rows with the `DELETE FROM .. USING` syntax. This bug is only present in alpha versions of v23.1.0. #100307 +- The [**Hot Ranges** page]({% link v23.1/ui-hot-ranges-page.md %}) DB Console page would show hot ranges by CPU and not QPS (queries per second), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting]({% link v23.1/cluster-settings.md %}) (default `cpu`). Now the page will always collect statistics based on QPS. #100211 +- In rare cases involving overload and schema changes, users could sometimes, transiently, see errors of the form `deadline below read timestamp is nonsensical; txn has would have no chance to commit`. These errors carried and internal pgcode and could not be retried. This form of error is now classified as a retriable error and will be retried automatically either by the client or internally. #100256 +- Fixed a bug in the declarative schema changer in v23.1 where unique without index can be incorrectly added in tables with duplicate values if it was added with a [`ALTER TABLE ... ADD/DROP COLUMN`]({% link v23.1/alter-table.md %}) in one `ALTER TABLE` statement. #100535 +- Fixed an issue where the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}) did not prevent a locality-optimized anti-join from looking up rows in remote regions. This bug is only present in alpha versions of v23.1.0. #100735

Performance improvements

-- Audit logging should no longer incur extra latency when resolving table/view/sequence names. [#99548][#99548] -- The webhook sink is now able to handle a drastically higher maximum throughput by enabling the `changefeed.new_webhook_sink_enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). [#100639][#100639] +- Audit logging should no longer incur extra latency when resolving table/view/sequence names. #99548 +- The webhook sink is now able to handle a drastically higher maximum throughput by enabling the `changefeed.new_webhook_sink_enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). #100639
@@ -53,28 +53,3 @@ This release includes 116 merged PRs by 44 authors.
-[#100162]: https://github.com/cockroachdb/cockroach/pull/100162 -[#100211]: https://github.com/cockroachdb/cockroach/pull/100211 -[#100214]: https://github.com/cockroachdb/cockroach/pull/100214 -[#100256]: https://github.com/cockroachdb/cockroach/pull/100256 -[#100259]: https://github.com/cockroachdb/cockroach/pull/100259 -[#100307]: https://github.com/cockroachdb/cockroach/pull/100307 -[#100359]: https://github.com/cockroachdb/cockroach/pull/100359 -[#100421]: https://github.com/cockroachdb/cockroach/pull/100421 -[#100424]: https://github.com/cockroachdb/cockroach/pull/100424 -[#100445]: https://github.com/cockroachdb/cockroach/pull/100445 -[#100505]: https://github.com/cockroachdb/cockroach/pull/100505 -[#100535]: https://github.com/cockroachdb/cockroach/pull/100535 -[#100604]: https://github.com/cockroachdb/cockroach/pull/100604 -[#100628]: https://github.com/cockroachdb/cockroach/pull/100628 -[#100639]: https://github.com/cockroachdb/cockroach/pull/100639 -[#100662]: https://github.com/cockroachdb/cockroach/pull/100662 -[#100720]: https://github.com/cockroachdb/cockroach/pull/100720 -[#100735]: https://github.com/cockroachdb/cockroach/pull/100735 -[#100878]: https://github.com/cockroachdb/cockroach/pull/100878 -[#99300]: https://github.com/cockroachdb/cockroach/pull/99300 -[#99548]: https://github.com/cockroachdb/cockroach/pull/99548 -[43306383f]: https://github.com/cockroachdb/cockroach/commit/43306383f -[4fd02898c]: https://github.com/cockroachdb/cockroach/commit/4fd02898c -[925e1600a]: https://github.com/cockroachdb/cockroach/commit/925e1600a -[f27e6b2eb]: https://github.com/cockroachdb/cockroach/commit/f27e6b2eb diff --git a/src/current/_includes/releases/v23.1/v23.1.0-beta.2.md b/src/current/_includes/releases/v23.1/v23.1.0-beta.2.md index 5cd2883fa1a..cbeac55f552 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-beta.2.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-beta.2.md @@ -6,31 +6,31 @@ Release Date: April 17, 2023

{{ site.data.products.enterprise }} edition changes

-- [CDC queries]({% link v23.1/create-changefeed.md %}) now support wrapped envelope with diff (`envelope='wrapped', diff`). [#101391][#101391] +- [CDC queries]({% link v23.1/create-changefeed.md %}) now support wrapped envelope with diff (`envelope='wrapped', diff`). #101391

SQL language changes

-- Added the [session variable]({% link v23.1/set-vars.md %}#supported-variables) `multiple_active_portals_enabled`. This setting is only for a [preview feature]({% link v23.1/cockroachdb-feature-availability.md %}). When set to `true`, it allows multiple portals to be open at the same time, with their execution interleaved with each other. In other words, these portals can be paused. The underlying statement for a pausable portal must be a read-only [`SELECT`]({% link v23.1/selection-queries.md %}) query without [sub-queries]({% link v23.1/subqueries.md %}) or postqueries (such as executed by [foreign key]({% link v23.1/foreign-key.md %}) checks), and such a portal is always executed with a [local query plan](https://www.cockroachlabs.com/docs/v23.1/architecture/sql-layer#distsql). [#101026][#101026] +- Added the [session variable]({% link v23.1/set-vars.md %}#supported-variables) `multiple_active_portals_enabled`. This setting is only for a [preview feature]({% link v23.1/cockroachdb-feature-availability.md %}). When set to `true`, it allows multiple portals to be open at the same time, with their execution interleaved with each other. In other words, these portals can be paused. The underlying statement for a pausable portal must be a read-only [`SELECT`]({% link v23.1/selection-queries.md %}) query without [sub-queries]({% link v23.1/subqueries.md %}) or postqueries (such as executed by [foreign key]({% link v23.1/foreign-key.md %}) checks), and such a portal is always executed with a [local query plan](https://www.cockroachlabs.com/docs/v23.1/architecture/sql-layer#distsql). #101026

DB Console changes

-- Update sort label on *Search Criteria* to match the name on the table columns on the [Statements]({% link v23.1/ui-statements-page.md %}) and [Transactions]({% link v23.1/ui-transactions-page.md %}) pages. [#101126][#101126] -- By default, we now show the `Application Name` column in the fingerprints overview pages. [Statement fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) and [Transaction fingerprints]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view) will be displayed per application on the overview pages rather than grouped into a single fingerprint ID. [#101235][#101235] -- When going from the Fingerprint Overview pages or the Insight Details pages to the Fingerprint Details page for [statements]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) or [transactions]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view), the details page will fetch data for the statement with the provided application name. For overview pages, this is the app name of the selected row. For insight details, it is the app of the execution that generated the insight. [#101235][#101235] +- Update sort label on *Search Criteria* to match the name on the table columns on the [Statements]({% link v23.1/ui-statements-page.md %}) and [Transactions]({% link v23.1/ui-transactions-page.md %}) pages. #101126 +- By default, we now show the `Application Name` column in the fingerprints overview pages. [Statement fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) and [Transaction fingerprints]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view) will be displayed per application on the overview pages rather than grouped into a single fingerprint ID. #101235 +- When going from the Fingerprint Overview pages or the Insight Details pages to the Fingerprint Details page for [statements]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) or [transactions]({% link v23.1/ui-transactions-page.md %}#transaction-fingerprints-view), the details page will fetch data for the statement with the provided application name. For overview pages, this is the app name of the selected row. For insight details, it is the app of the execution that generated the insight. #101235

Bug fixes

-- Fixed a bug so that the text search `@@` [operator]({% link v23.1/functions-and-operators.md %}) ("matches") can work with variable expressions. Fixed a bug where incorrect results were returned when one of the arguments was a [`TEXT`]({% link v23.1/string.md %}) expression and the other argument was a `TEXT` or `TSQuery` expression. [#100918][#100918] -- Fixed a bug where running [`DROP COLUMN ... CASCADE`]({% link v23.1/alter-table.md %}#drop-column) when that column is used in an [index]({% link v23.1/indexes.md %}) that includes other columns caused a panic. [#100856][#100856] -- Fixed a rare race condition on [node startup]({% link v23.1/cockroach-start.md %}) that could cause an `invalid memory address or nil pointer dereference` error. [#100626][#100626] -- Fixed a bug that was causing nodes running on Windows to crash on startup. This bug only existed in v23.1 alphas. [#101091][#101091] -- Fixed a rare condition that could allow a [transaction]({% link v23.1/transactions.md %}) to get stuck indefinitely waiting on a released row-level [lock](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#concurrency-control) if the per-range lock count limit was exceeded while the transaction was waiting on another lock. [#100946][#100946] -- Fixed a rare internal error in the [optimizer]({% link v23.1/cost-based-optimizer.md %}) that has existed since before version v22.1, which could occur while enforcing orderings between SQL operators. [#101355][#101355] -- Fixed a bug so that the `crdb_internal.deserialize_session` internal function works properly with prepared statements that have more param type hints than params. Before this bugfix, deserializing a [session]({% link v23.1/show-sessions.md %}) containing a prepared statement with more parameter type hints than parameters would panic. For example: `PREPARE p (int) AS SELECT 1`. These extra type hints are now ignored by `crdb_internal.deserialize_session`. [#101368][#101368] +- Fixed a bug so that the text search `@@` [operator]({% link v23.1/functions-and-operators.md %}) ("matches") can work with variable expressions. Fixed a bug where incorrect results were returned when one of the arguments was a [`TEXT`]({% link v23.1/string.md %}) expression and the other argument was a `TEXT` or `TSQuery` expression. #100918 +- Fixed a bug where running [`DROP COLUMN ... CASCADE`]({% link v23.1/alter-table.md %}#drop-column) when that column is used in an [index]({% link v23.1/indexes.md %}) that includes other columns caused a panic. #100856 +- Fixed a rare race condition on [node startup]({% link v23.1/cockroach-start.md %}) that could cause an `invalid memory address or nil pointer dereference` error. #100626 +- Fixed a bug that was causing nodes running on Windows to crash on startup. This bug only existed in v23.1 alphas. #101091 +- Fixed a rare condition that could allow a [transaction]({% link v23.1/transactions.md %}) to get stuck indefinitely waiting on a released row-level [lock](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#concurrency-control) if the per-range lock count limit was exceeded while the transaction was waiting on another lock. #100946 +- Fixed a rare internal error in the [optimizer]({% link v23.1/cost-based-optimizer.md %}) that has existed since before version v22.1, which could occur while enforcing orderings between SQL operators. #101355 +- Fixed a bug so that the `crdb_internal.deserialize_session` internal function works properly with prepared statements that have more param type hints than params. Before this bugfix, deserializing a [session]({% link v23.1/show-sessions.md %}) containing a prepared statement with more parameter type hints than parameters would panic. For example: `PREPARE p (int) AS SELECT 1`. These extra type hints are now ignored by `crdb_internal.deserialize_session`. #101368

Performance improvements

-- Google Cloud Pub/Sub sink [changefeeds]({% link v23.1/create-changefeed.md %}) can now support higher throughputs by enabling the `changefeed.new_pubsub_sink_enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). Enabling this setting will cause changefeeds to use a newer Pub/Sub sink, which uses capitalized top-level fields in the message: `{Key: ..., Value: ..., Topic: ...}`. As a result, you may need to reconfigure downstream systems to parse the new message format. If you do not enable `changefeed.new_pubsub_sink_enabled`, the top-level message fields remain all lowercase: `{key: ..., value: ..., topic: ...}`. [#100930][#100930] +- Google Cloud Pub/Sub sink [changefeeds]({% link v23.1/create-changefeed.md %}) can now support higher throughputs by enabling the `changefeed.new_pubsub_sink_enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). Enabling this setting will cause changefeeds to use a newer Pub/Sub sink, which uses capitalized top-level fields in the message: `{Key: ..., Value: ..., Topic: ...}`. As a result, you may need to reconfigure downstream systems to parse the new message format. If you do not enable `changefeed.new_pubsub_sink_enabled`, the top-level message fields remain all lowercase: `{key: ..., value: ..., topic: ...}`. #100930
@@ -40,15 +40,3 @@ This release includes 57 merged PRs by 29 authors.
-[#100626]: https://github.com/cockroachdb/cockroach/pull/100626 -[#100856]: https://github.com/cockroachdb/cockroach/pull/100856 -[#100918]: https://github.com/cockroachdb/cockroach/pull/100918 -[#100930]: https://github.com/cockroachdb/cockroach/pull/100930 -[#100946]: https://github.com/cockroachdb/cockroach/pull/100946 -[#101026]: https://github.com/cockroachdb/cockroach/pull/101026 -[#101091]: https://github.com/cockroachdb/cockroach/pull/101091 -[#101126]: https://github.com/cockroachdb/cockroach/pull/101126 -[#101235]: https://github.com/cockroachdb/cockroach/pull/101235 -[#101355]: https://github.com/cockroachdb/cockroach/pull/101355 -[#101368]: https://github.com/cockroachdb/cockroach/pull/101368 -[#101391]: https://github.com/cockroachdb/cockroach/pull/101391 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-beta.3.md b/src/current/_includes/releases/v23.1/v23.1.0-beta.3.md index 714b1cf739d..796246b32a0 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-beta.3.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-beta.3.md @@ -6,36 +6,36 @@ Release Date: April 24, 2023

Backward-incompatible changes

-- Previously, if a user specified a [`search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema) in the connection string parameters, it would always be treated as case sensitive. Now, in order to have the schema names in the `search_path` respect case, the user must include double quotes around the name. [#101492][#101492] +- Previously, if a user specified a [`search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema) in the connection string parameters, it would always be treated as case sensitive. Now, in order to have the schema names in the `search_path` respect case, the user must include double quotes around the name. #101492

SQL language changes

-- Statements of type [`SET ...`]({% link v23.1/set-vars.md %}) are no longer displayed on the [Insights page]({% link v23.1/ui-insights-page.md %}). [#101672][#101672] +- Statements of type [`SET ...`]({% link v23.1/set-vars.md %}) are no longer displayed on the [Insights page]({% link v23.1/ui-insights-page.md %}). #101672

Operational changes

-- The amount of [replication](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer) traffic in flight from a single [Raft leader](https://www.cockroachlabs.com/docs/v23.1/architecture/reads-and-writes-overview#architecture-raft-leader) to a follower has been reduced from 256 MB to 32 MB. This reduces the chance of running out of memory during bulk write operations. This can be controlled via the environment variable `COCKROACH_RAFT_MAX_INFLIGHT_BYTES`. [#101508][#101508] +- The amount of [replication](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer) traffic in flight from a single [Raft leader](https://www.cockroachlabs.com/docs/v23.1/architecture/reads-and-writes-overview#architecture-raft-leader) to a follower has been reduced from 256 MB to 32 MB. This reduces the chance of running out of memory during bulk write operations. This can be controlled via the environment variable `COCKROACH_RAFT_MAX_INFLIGHT_BYTES`. #101508

DB Console changes

-- When a user activates diagnostics on the [SQL Activity]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) page, there is now an option to select the trace rate for statement diagnostics collection. [#101762][#101762] -- Updated the **Network Latency** side navigation name and Network Diagnostics page title to Network. Updated the Advanced Debugging page title to [Advanced Debug]({% link v23.1/ui-debug-pages.md %}). [#101761][#101761] -- Introduced a "draining node" **Node Status** on the [Cluster Overview panel]({% link v23.1/ui-cluster-overview-page.md %}#cluster-overview-panel) so that "draining node" has it own value instead of counting as a "dead node". [#101794][#101794] -- Added a time scale selector to the [**Diagnostics** tab]({% link v23.1/ui-statements-page.md %}#diagnostics) under the Statement Details page. This allows users to view bundles from the selected period only. [#101803][#101803] +- When a user activates diagnostics on the [SQL Activity]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) page, there is now an option to select the trace rate for statement diagnostics collection. #101762 +- Updated the **Network Latency** side navigation name and Network Diagnostics page title to Network. Updated the Advanced Debugging page title to [Advanced Debug]({% link v23.1/ui-debug-pages.md %}). #101761 +- Introduced a "draining node" **Node Status** on the [Cluster Overview panel]({% link v23.1/ui-cluster-overview-page.md %}#cluster-overview-panel) so that "draining node" has it own value instead of counting as a "dead node". #101794 +- Added a time scale selector to the [**Diagnostics** tab]({% link v23.1/ui-statements-page.md %}#diagnostics) under the Statement Details page. This allows users to view bundles from the selected period only. #101803

Bug fixes

-- Fixed a bug that caused internal errors when executing [user-defined functions]({% link v23.1/user-defined-functions.md %}) with empty bodies. This bug was only present in alpha pre-release versions of 23.1. [#101383][#101383] -- Fixed a bug that caused a [restore]({% link v23.1/restore.md %}) to fail occasionally due to incorrect schema ID resolution when restoring a [backup]({% link v23.1/backup.md %}) with [user-defined schemas]({% link v23.1/schema-design-schema.md %}). [#101310][#101310] -- Fixed a bug in parsing a [`search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema) with a quote in it when specified in the [connection string]({% link v23.1/connection-parameters.md %}). Also, the `search_path` [session variable]({% link v23.1/set-vars.md %}) now supports schema names that have commas in them. [#101492][#101492] -- Fixed a bug that has existed since [user-defined functions]({% link v23.1/user-defined-functions.md %}) were introduced that could cause a function call to resolve to the wrong function after changes to the [schema search path]({% link v23.1/sql-name-resolution.md %}#current-schema). [#101513][#101513] -- Previously, CockroachDB v23.1 alpha and beta versions would panic on [`cockroach start`]({% link v23.1/cockroach-start.md %}) command when the `GOMEMLIMIT` environment variable was set and the `--max-go-memory` flag wasn't specified. This is now fixed. [#101565][#101565] -- Fixed a bug that caused errors in test builds and potentially incorrect results in release builds when invoking a [user-defined function]({% link v23.1/user-defined-functions.md %}) with a [subquery]({% link v23.1/subqueries.md %}) argument. This bug was only present in v23.1 alpha versions. [#101639][#101639] -- Fixed an internal error that could occur when the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}) is on and the input to the [lookup join]({% link v23.1/joins.md %}#lookup-joins) is a `SELECT` of [scalar expressions]({% link v23.1/scalar-expressions.md %}) (e.g., `1+1`). Also, [subqueries]({% link v23.1/subqueries.md %}) with no [home region]({% link v23.1/multiregion-overview.md %}#table-localities) now error out with `enforce_home_region` set. [#101504][#101504] -- Point [inserts]({% link v23.1/insert.md %}) and [updates]({% link v23.1/update.md %}) that write to a remote region of a table created with the [`REGIONAL BY ROW AS`]({% link v23.1/create-table.md %}#create-a-table-with-a-regional-by-row-locality-using-a-custom-region-column) clause will now error out. [#101708][#101708] -- Fixed a bug in the [built-in functions]({% link v23.1/functions-and-operators.md %}) `pg_get_indexdef` and `col_description` that could cause the functions to return errors if the user created tables named `pg_indexes` or `pg_attribute`. Or, if the user created a schema named `system` with a table named `comments`. This bug was only present in pre-release versions of v23.1. [#101690][#101690] -- Fixed a bug where, when CockroachDB failed to retrieve [contention information]({% link v23.1/performance-best-practices-overview.md %}#transaction-contention), the full [Insights page]({% link v23.1/ui-insights-page.md %}) would return an error. Now the Insights page will load even when there is an issue with decoding contention information. [#101784][#101784] -- Fixed a bug to ensure that the [list of fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) used per [index]({% link v23.1/indexes.md %}) is shown even when there is a max-size limit on the SQL API. [#101785][#101785] +- Fixed a bug that caused internal errors when executing [user-defined functions]({% link v23.1/user-defined-functions.md %}) with empty bodies. This bug was only present in alpha pre-release versions of 23.1. #101383 +- Fixed a bug that caused a [restore]({% link v23.1/restore.md %}) to fail occasionally due to incorrect schema ID resolution when restoring a [backup]({% link v23.1/backup.md %}) with [user-defined schemas]({% link v23.1/schema-design-schema.md %}). #101310 +- Fixed a bug in parsing a [`search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema) with a quote in it when specified in the [connection string]({% link v23.1/connection-parameters.md %}). Also, the `search_path` [session variable]({% link v23.1/set-vars.md %}) now supports schema names that have commas in them. #101492 +- Fixed a bug that has existed since [user-defined functions]({% link v23.1/user-defined-functions.md %}) were introduced that could cause a function call to resolve to the wrong function after changes to the [schema search path]({% link v23.1/sql-name-resolution.md %}#current-schema). #101513 +- Previously, CockroachDB v23.1 alpha and beta versions would panic on [`cockroach start`]({% link v23.1/cockroach-start.md %}) command when the `GOMEMLIMIT` environment variable was set and the `--max-go-memory` flag wasn't specified. This is now fixed. #101565 +- Fixed a bug that caused errors in test builds and potentially incorrect results in release builds when invoking a [user-defined function]({% link v23.1/user-defined-functions.md %}) with a [subquery]({% link v23.1/subqueries.md %}) argument. This bug was only present in v23.1 alpha versions. #101639 +- Fixed an internal error that could occur when the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}) is on and the input to the [lookup join]({% link v23.1/joins.md %}#lookup-joins) is a `SELECT` of [scalar expressions]({% link v23.1/scalar-expressions.md %}) (e.g., `1+1`). Also, [subqueries]({% link v23.1/subqueries.md %}) with no [home region]({% link v23.1/multiregion-overview.md %}#table-localities) now error out with `enforce_home_region` set. #101504 +- Point [inserts]({% link v23.1/insert.md %}) and [updates]({% link v23.1/update.md %}) that write to a remote region of a table created with the [`REGIONAL BY ROW AS`]({% link v23.1/create-table.md %}#create-a-table-with-a-regional-by-row-locality-using-a-custom-region-column) clause will now error out. #101708 +- Fixed a bug in the [built-in functions]({% link v23.1/functions-and-operators.md %}) `pg_get_indexdef` and `col_description` that could cause the functions to return errors if the user created tables named `pg_indexes` or `pg_attribute`. Or, if the user created a schema named `system` with a table named `comments`. This bug was only present in pre-release versions of v23.1. #101690 +- Fixed a bug where, when CockroachDB failed to retrieve [contention information]({% link v23.1/performance-best-practices-overview.md %}#transaction-contention), the full [Insights page]({% link v23.1/ui-insights-page.md %}) would return an error. Now the Insights page will load even when there is an issue with decoding contention information. #101784 +- Fixed a bug to ensure that the [list of fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) used per [index]({% link v23.1/indexes.md %}) is shown even when there is a max-size limit on the SQL API. #101785

Contributors

@@ -43,21 +43,3 @@ This release includes 36 merged PRs by 15 authors. -[#101310]: https://github.com/cockroachdb/cockroach/pull/101310 -[#101383]: https://github.com/cockroachdb/cockroach/pull/101383 -[#101432]: https://github.com/cockroachdb/cockroach/pull/101432 -[#101492]: https://github.com/cockroachdb/cockroach/pull/101492 -[#101504]: https://github.com/cockroachdb/cockroach/pull/101504 -[#101508]: https://github.com/cockroachdb/cockroach/pull/101508 -[#101513]: https://github.com/cockroachdb/cockroach/pull/101513 -[#101565]: https://github.com/cockroachdb/cockroach/pull/101565 -[#101639]: https://github.com/cockroachdb/cockroach/pull/101639 -[#101672]: https://github.com/cockroachdb/cockroach/pull/101672 -[#101690]: https://github.com/cockroachdb/cockroach/pull/101690 -[#101708]: https://github.com/cockroachdb/cockroach/pull/101708 -[#101761]: https://github.com/cockroachdb/cockroach/pull/101761 -[#101762]: https://github.com/cockroachdb/cockroach/pull/101762 -[#101784]: https://github.com/cockroachdb/cockroach/pull/101784 -[#101785]: https://github.com/cockroachdb/cockroach/pull/101785 -[#101794]: https://github.com/cockroachdb/cockroach/pull/101794 -[#101803]: https://github.com/cockroachdb/cockroach/pull/101803 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-rc.1.md b/src/current/_includes/releases/v23.1/v23.1.0-rc.1.md index 723327689d9..e23fd591222 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-rc.1.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-rc.1.md @@ -6,39 +6,39 @@ Release Date: May 2, 2023

SQL language changes

-- Two views have been added to the [`crdb_internal` system catalog]({% link v23.1/crdb-internal.md %}): `crdb_internal.statement_activity` surfaces data in the persisted `system.statement_activity` table, and `crdb_internal.transaction_activity` surfaces the `system.transaction_activity` table. [#102047][#102047] -- Span statistics are now unavailable on mixed-version clusters. [#101878][#101878] +- Two views have been added to the [`crdb_internal` system catalog]({% link v23.1/crdb-internal.md %}): `crdb_internal.statement_activity` surfaces data in the persisted `system.statement_activity` table, and `crdb_internal.transaction_activity` surfaces the `system.transaction_activity` table. #102047 +- Span statistics are now unavailable on mixed-version clusters. #101878

DB Console changes

-- Timestamps are now displayed in DB Console in the timezone specified in the `ui.display_timezone` [cluster setting]({% link v23.1/cluster-settings.md %}), if set. Currently supported timezones are `Coordinated Universal Time` and `America/New_York`. [#102241][#102241] -- DB Console now limits historical data for SQL Activity pages to 200000 rows by default, based on the new `sql.stats.activity.persisted_rows.max` [cluster setting]({% link v23.1/cluster-settings.md %}). A background job automatically prunes the system tables `transaction_activity` and `statement_activity`. [#101995][#101995] -- Improved performance when viewing the [`crdb_internal.transaction_contention_events` table]({% link v23.1/crdb-internal.md %}) in DB Console if there are a large number of rows. [#101871][#101871] +- Timestamps are now displayed in DB Console in the timezone specified in the `ui.display_timezone` [cluster setting]({% link v23.1/cluster-settings.md %}), if set. Currently supported timezones are `Coordinated Universal Time` and `America/New_York`. #102241 +- DB Console now limits historical data for SQL Activity pages to 200000 rows by default, based on the new `sql.stats.activity.persisted_rows.max` [cluster setting]({% link v23.1/cluster-settings.md %}). A background job automatically prunes the system tables `transaction_activity` and `statement_activity`. #101995 +- Improved performance when viewing the [`crdb_internal.transaction_contention_events` table]({% link v23.1/crdb-internal.md %}) in DB Console if there are a large number of rows. #101871

Bug fixes

-- Fixed a bug introduced prior to v22.1 where an `EXPORT` statement could be incorrectly evaluated and result in a node panic or incorrect query results if it had projection or rendering on top of the `EXPORT`, such as the statement `WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;`. Only the presentation of the query result was affected, not the exported data. [#101806][#101806] -- The descriptions of the `rebalancing.readbytespersecond` and `rebalancing.writebytespersecond` metrics now correctly reference bytes read and bytes written, respectively. [#101710][#101710] -- Fixed a rare bug introduced prior to v22.1 where distributed plans could cause the graceful drain of a node to become stuck retrying forever during [node shutdown]({% link v23.1/node-shutdown.md %}). This bug leads to errors like `drain details: distSQL execution flows:`, together with a non-zero number of flows that is not reducing over a long period of time. [#101884][#101884] -- Fixed a bug where a [`RESTORE`]({% link v23.1/restore.md %}) operation with `skip_localities_check` could fail with errors if regions were missing on a cluster. [#101798][#101798] -- Fixed a bug introduced in testing releases of v23.1 that could cause incorrect results for queries with `STRICT` [user-defined functions]({% link v23.1/user-defined-functions.md %}). [#101951][#101951] -- Fixed a rare bug that could cause keys to be unexpectedly deleted locally within a store by replica rebalancing during a write heavy workload. [#102166][#102166] -- Fixed a bug where a failed or cancelled [`IMPORT`]({% link v23.1/import.md %}) operation could leave some of the imported rows behind after it was cancelled, if the writing processes were slow enough to continue writing after the cleanup process started. [#102246][#102246] -- Fixed a bug in the behavior of the `enforce_home_region` [session variable]({% link v23.1/set-vars.md %}) that may have allowed a hash join to be favored over a lookup join, or failed to error out remote accesses done by uniqueness checks for mutations on [`REGIONAL BY ROW` tables]({% link v23.1/regional-tables.md %}). [#102287][#102287] -- Fixed a bug introduced in testing releases of v23.1 where a node could crash when evaluating a [`COPY`]({% link v23.1/copy.md %}) command when the schema had `INT2` or `INT4` type. [#102307][#102307] -- Fixed a bug where a backup of a key's revision history may not correctly be restored to the proper revision of the key if it is split across multiple sorted string tables. [#102342][#102342] -- Fixed a bug introduced in testing releases of v23.1 where a user could be prevented from logging in or viewing or changing [`GRANT`s]({% link v23.1/grant.md %}) if the cluster had a long period of inactivity. [#102488][#102488] -- Fixed a bug where a node that transferred a lease away and missed a liveness heartbeat could then erroneously update the closed timestamp during the stasis period of its liveness. This could lead to closed timestamp invariant violation and could cause the node to crash. In extreme cases, this could lead to inconsistencies in read-only queries. [#102602][#102602] -- Fixed a bug that caused memory leaks when a closed or cancelled connection was released. [#101810][#101810] +- Fixed a bug introduced prior to v22.1 where an `EXPORT` statement could be incorrectly evaluated and result in a node panic or incorrect query results if it had projection or rendering on top of the `EXPORT`, such as the statement `WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;`. Only the presentation of the query result was affected, not the exported data. #101806 +- The descriptions of the `rebalancing.readbytespersecond` and `rebalancing.writebytespersecond` metrics now correctly reference bytes read and bytes written, respectively. #101710 +- Fixed a rare bug introduced prior to v22.1 where distributed plans could cause the graceful drain of a node to become stuck retrying forever during [node shutdown]({% link v23.1/node-shutdown.md %}). This bug leads to errors like `drain details: distSQL execution flows:`, together with a non-zero number of flows that is not reducing over a long period of time. #101884 +- Fixed a bug where a [`RESTORE`]({% link v23.1/restore.md %}) operation with `skip_localities_check` could fail with errors if regions were missing on a cluster. #101798 +- Fixed a bug introduced in testing releases of v23.1 that could cause incorrect results for queries with `STRICT` [user-defined functions]({% link v23.1/user-defined-functions.md %}). #101951 +- Fixed a rare bug that could cause keys to be unexpectedly deleted locally within a store by replica rebalancing during a write heavy workload. #102166 +- Fixed a bug where a failed or cancelled [`IMPORT`]({% link v23.1/import.md %}) operation could leave some of the imported rows behind after it was cancelled, if the writing processes were slow enough to continue writing after the cleanup process started. #102246 +- Fixed a bug in the behavior of the `enforce_home_region` [session variable]({% link v23.1/set-vars.md %}) that may have allowed a hash join to be favored over a lookup join, or failed to error out remote accesses done by uniqueness checks for mutations on [`REGIONAL BY ROW` tables]({% link v23.1/regional-tables.md %}). #102287 +- Fixed a bug introduced in testing releases of v23.1 where a node could crash when evaluating a [`COPY`]({% link v23.1/copy.md %}) command when the schema had `INT2` or `INT4` type. #102307 +- Fixed a bug where a backup of a key's revision history may not correctly be restored to the proper revision of the key if it is split across multiple sorted string tables. #102342 +- Fixed a bug introduced in testing releases of v23.1 where a user could be prevented from logging in or viewing or changing [`GRANT`s]({% link v23.1/grant.md %}) if the cluster had a long period of inactivity. #102488 +- Fixed a bug where a node that transferred a lease away and missed a liveness heartbeat could then erroneously update the closed timestamp during the stasis period of its liveness. This could lead to closed timestamp invariant violation and could cause the node to crash. In extreme cases, this could lead to inconsistencies in read-only queries. #102602 +- Fixed a bug that caused memory leaks when a closed or cancelled connection was released. #101810

Performance improvements

-- [SQL Activity]({% link v23.1/ui-sql-dashboard.md %}) dashboards now default to using a table optimized with the top data for the most used cases. Otherwise, they use persisted data if available, and in-memory data otherwise. [#102310][#102310] -- Static erroring of some locality-optimized lookup [joins]({% link v23.1/joins.md %}) is now handled dynamically during query execution. [#102287][#102287] +- [SQL Activity]({% link v23.1/ui-sql-dashboard.md %}) dashboards now default to using a table optimized with the top data for the most used cases. Otherwise, they use persisted data if available, and in-memory data otherwise. #102310 +- Static erroring of some locality-optimized lookup [joins]({% link v23.1/joins.md %}) is now handled dynamically during query execution. #102287

Miscellaneous

-- Two new statistics help to track the efficiency of snapshot transfers and to keep the number of failures due to system-level races as low as possible. `range.snapshots.recv-failed` shows the number of snapshot send attempts that are initiated but not accepted by the recipient. `range.snapshots.recv-unusable` shows the number of snapshots that were fully transmitted but not used. [#101835][#101835] +- Two new statistics help to track the efficiency of snapshot transfers and to keep the number of failures due to system-level races as low as possible. `range.snapshots.recv-failed` shows the number of snapshot send attempts that are initiated but not accepted by the recipient. `range.snapshots.recv-unusable` shows the number of snapshots that were fully transmitted but not used. #101835
@@ -51,25 +51,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#101710]: https://github.com/cockroachdb/cockroach/pull/101710 -[#101798]: https://github.com/cockroachdb/cockroach/pull/101798 -[#101806]: https://github.com/cockroachdb/cockroach/pull/101806 -[#101810]: https://github.com/cockroachdb/cockroach/pull/101810 -[#101835]: https://github.com/cockroachdb/cockroach/pull/101835 -[#101871]: https://github.com/cockroachdb/cockroach/pull/101871 -[#101878]: https://github.com/cockroachdb/cockroach/pull/101878 -[#101884]: https://github.com/cockroachdb/cockroach/pull/101884 -[#101951]: https://github.com/cockroachdb/cockroach/pull/101951 -[#101995]: https://github.com/cockroachdb/cockroach/pull/101995 -[#101997]: https://github.com/cockroachdb/cockroach/pull/101997 -[#102047]: https://github.com/cockroachdb/cockroach/pull/102047 -[#102166]: https://github.com/cockroachdb/cockroach/pull/102166 -[#102241]: https://github.com/cockroachdb/cockroach/pull/102241 -[#102246]: https://github.com/cockroachdb/cockroach/pull/102246 -[#102287]: https://github.com/cockroachdb/cockroach/pull/102287 -[#102307]: https://github.com/cockroachdb/cockroach/pull/102307 -[#102310]: https://github.com/cockroachdb/cockroach/pull/102310 -[#102342]: https://github.com/cockroachdb/cockroach/pull/102342 -[#102488]: https://github.com/cockroachdb/cockroach/pull/102488 -[#102602]: https://github.com/cockroachdb/cockroach/pull/102602 -[89e1a0086]: https://github.com/cockroachdb/cockroach/commit/89e1a0086 diff --git a/src/current/_includes/releases/v23.1/v23.1.0-rc.2.md b/src/current/_includes/releases/v23.1/v23.1.0-rc.2.md index 252ecfdf4a9..eca1eb0e135 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0-rc.2.md +++ b/src/current/_includes/releases/v23.1/v23.1.0-rc.2.md @@ -6,7 +6,7 @@ Release Date: May 4, 2023

Bug fixes

-- Fixed a bug introduced in versions 22.1.19, 22.2.8, and pre-release versions of 23.1 that could cause queries to return spurious insufficient [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) errors. For the bug to occur, two databases would need to have duplicate tables, each with a [foreign key]({% link v23.1/foreign-key.md %}) reference to another table. The error would then occur if the same SQL string was executed against both databases concurrently by users that have [privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) over only one of the tables. [#102651][#102651] +- Fixed a bug introduced in versions 22.1.19, 22.2.8, and pre-release versions of 23.1 that could cause queries to return spurious insufficient [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) errors. For the bug to occur, two databases would need to have duplicate tables, each with a [foreign key]({% link v23.1/foreign-key.md %}) reference to another table. The error would then occur if the same SQL string was executed against both databases concurrently by users that have [privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) over only one of the tables. #102651
@@ -16,4 +16,3 @@ This release includes 4 merged PRs by 4 authors.
-[#102651]: https://github.com/cockroachdb/cockroach/pull/102651 diff --git a/src/current/_includes/releases/v23.1/v23.1.0.md b/src/current/_includes/releases/v23.1/v23.1.0.md index 80e320ac0b5..7e06952234b 100644 --- a/src/current/_includes/releases/v23.1/v23.1.0.md +++ b/src/current/_includes/releases/v23.1/v23.1.0.md @@ -427,8 +427,8 @@ This change will only apply to new clusters. Existing clusters will retain the 2 Before [upgrading to CockroachDB v23.1]({% link v23.1/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as key cluster setting changes, and adjust your deployment as necessary. -- Replaced the `cdc_prev()` [function]({% link v23.1/functions-and-operators.md %}) in favor of a `cdc_prev` tuple. This is an incompatible change that may break [changefeeds]({% link v23.1/change-data-capture-overview.md %}) that use the previous `cdc_prev()` function. [#85177][#85177] -- [`SHOW RANGES FOR TABLE`]({% link v23.1/show-ranges.md %}) now includes rows for all indexes that support the table. Prior to this change, `SHOW RANGES FOR TABLE foo` was an alias for `SHOW RANGES FOR INDEX foo@primary`. This was causing confusion, as it would miss data for secondary indexes. It is still possible to filter to just the primary index using `SHOW RANGES FOR INDEX foo@primary`. The statement output now also includes the index name. [#93545][#93545] +- Replaced the `cdc_prev()` [function]({% link v23.1/functions-and-operators.md %}) in favor of a `cdc_prev` tuple. This is an incompatible change that may break [changefeeds]({% link v23.1/change-data-capture-overview.md %}) that use the previous `cdc_prev()` function. #85177 +- [`SHOW RANGES FOR TABLE`]({% link v23.1/show-ranges.md %}) now includes rows for all indexes that support the table. Prior to this change, `SHOW RANGES FOR TABLE foo` was an alias for `SHOW RANGES FOR INDEX foo@primary`. This was causing confusion, as it would miss data for secondary indexes. It is still possible to filter to just the primary index using `SHOW RANGES FOR INDEX foo@primary`. The statement output now also includes the index name. #93545 - CockroachDB now supports sharing storage ranges across multiple indexes/tables. This behavior is not enabled by default in v23.1, but will be enabled by default in a future release. When the behavior is enabled, there will no longer be a guarantee that there is at most one SQL object (e.g., table/index/sequence/materialized view) per storage range. As a result, the columns `table_id`, `database_name`, `schema_name`, `table_name` and `index_name` in `crdb_internal.ranges` and `.ranges_no_leases` will become meaningless, since a range will no longer be attributed to a single table/index. In v23.1, the default behavior of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) is retained, but you should consider setting the [`sql.show_ranges_deprecated_behavior.enabled`]({% link v23.1/cluster-settings.md %}#setting-sql-show-ranges-deprecated-behavior-enabled) cluster setting to `false`. This will have the following effects that will become the defaults in a future release: - The aforementioned columns in the `crdb_internal` virtual tables will be removed. Existing code can use the updated output of the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) statement instead, optionally using `WITH KEYS` to expose the raw start/end keys. @@ -437,17 +437,17 @@ Before [upgrading to CockroachDB v23.1]({% link v23.1/upgrade-cockroach-version. - Instead of: `SELECT range_id FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT range_id FROM [SHOW RANGES FROM TABLE x]` - Instead of `SELECT range_id FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (variable / unpredictable table name or ID), use: `SELECT range_id FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES] WHERE table_name = $1 OR table_id = $2` - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = 'x'`, use: `SELECT raw_start_key FROM [SHOW RANGES FROM TABLE x WITH KEYS]` - - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (unpredictable / variable table name or ID), use: `SELECT raw_start_key FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES, KEYS] WHERE table_name = $1 OR table_id = $2` [#93644][#93644] -- When the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` is set to `false` (recommended in v23.1)]({% link v23.1/cluster-settings.md %}#setting-sql-show-ranges-deprecated-behavior-enabled), the format of the columns `start_key` and `end_key` for [`SHOW RANGES FROM DATABASE`]({% link v23.1/show-ranges.md %}) and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. [#93644][#93644] -- When the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` is set to `false` (recommended in v23.1)]({% link v23.1/cluster-settings.md %}#setting-sql-show-ranges-deprecated-behavior-enabled), the output of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option [`WITH DETAILS`]({% link v23.1/show-ranges.md %}#options) to include these columns. [#93644][#93644] -- The format of the columns `start_key` and `end_key` for `SHOW RANGE ... FOR ROW` has been changed to be consistent with the output of `SHOW RANGES FROM INDEX`. [#93644][#93644] -- Changefeeds using "preview" expressions (released in v23.1.0) and that access the previous state of the row using the `cdc_prev()` function will no longer work and will need to be recreated with new syntax. [#94429][#94429] -- Some of the transformations specific to changefeeds have been deprecated and replaced. These functions were released in limited access in v22.2. Deprecated changefeed transformations continue to function. Closely monitor changefeeds that are created during upgrade. While effort was made to maintain backward compatibility, the updated changefeed transformation may produce slightly different output, such as different column names. [#96295][#96295] -- Fixed a bug where, when `server.identity_map.configuration` was used, CockroachDB did not verify the client-provided username against the target mappings. Note that **this means that the client must now provide a valid DB username.** This requirement is compatible with PostgreSQL; it was not previously required by CockroachDB but it is now. This does not apply when identity maps are not in use. [#94915][#94915] -- Previously, the type of the `replicas`, `voting_replicas`,`non_voting_replicas` and `learner_replicas` in `crdb_internal.ranges` were overridden to `INT2VECTOR` causing incompatible indexing between `.ranges` and `.ranges_no_leases`. Now the types of those columns in the two tables are set to `INT[]`. [#96287][#96287] -- The output of the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) command for the `crdb_internal.ranges` and `crdb_internal.ranges_no_leases` tables has been updated, and the previous output is deprecated. To enable the new command output, set the `sql.show_ranges_deprecated_behavior.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false`. The new output will become default in v23.2. [#99618][#99618] -- Previously, if a user specified a [`search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema) in the connection string parameters, it would always be treated as case sensitive. Now, in order to have the schema names in the `search_path` respect case, the user must include double quotes around the name. [#101492][#101492] -- The deprecated CLI command `debug unsafe-remove-dead-replicas` has been removed. Use `debug recover` instead. [#89150][#89150] + - Instead of `SELECT start_key FROM crdb_internal.ranges WHERE table_name = $1 OR table_id = $2` (unpredictable / variable table name or ID), use: `SELECT raw_start_key FROM [SHOW RANGES FROM CURRENT_CATALOG WITH TABLES, KEYS] WHERE table_name = $1 OR table_id = $2` #93644 +- When the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` is set to `false` (recommended in v23.1)]({% link v23.1/cluster-settings.md %}#setting-sql-show-ranges-deprecated-behavior-enabled), the format of the columns `start_key` and `end_key` for [`SHOW RANGES FROM DATABASE`]({% link v23.1/show-ranges.md %}) and `SHOW RANGES FROM TABLE` have been extended to include which table/index the key belongs to. This is necessary because a range can now contain data from more than one table/index. #93644 +- When the [cluster setting `sql.show_ranges_deprecated_behavior.enabled` is set to `false` (recommended in v23.1)]({% link v23.1/cluster-settings.md %}#setting-sql-show-ranges-deprecated-behavior-enabled), the output of [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) no longer includes `range_size`, `range_size_mb`, `lease_holder`, or `lease_holder_localities` by default. This ensures that `SHOW RANGES` remains fast in the common case. Use the new option [`WITH DETAILS`]({% link v23.1/show-ranges.md %}#options) to include these columns. #93644 +- The format of the columns `start_key` and `end_key` for `SHOW RANGE ... FOR ROW` has been changed to be consistent with the output of `SHOW RANGES FROM INDEX`. #93644 +- Changefeeds using "preview" expressions (released in v23.1.0) and that access the previous state of the row using the `cdc_prev()` function will no longer work and will need to be recreated with new syntax. #94429 +- Some of the transformations specific to changefeeds have been deprecated and replaced. These functions were released in limited access in v22.2. Deprecated changefeed transformations continue to function. Closely monitor changefeeds that are created during upgrade. While effort was made to maintain backward compatibility, the updated changefeed transformation may produce slightly different output, such as different column names. #96295 +- Fixed a bug where, when `server.identity_map.configuration` was used, CockroachDB did not verify the client-provided username against the target mappings. Note that **this means that the client must now provide a valid DB username.** This requirement is compatible with PostgreSQL; it was not previously required by CockroachDB but it is now. This does not apply when identity maps are not in use. #94915 +- Previously, the type of the `replicas`, `voting_replicas`,`non_voting_replicas` and `learner_replicas` in `crdb_internal.ranges` were overridden to `INT2VECTOR` causing incompatible indexing between `.ranges` and `.ranges_no_leases`. Now the types of those columns in the two tables are set to `INT[]`. #96287 +- The output of the [`SHOW RANGES`]({% link v23.1/show-ranges.md %}) command for the `crdb_internal.ranges` and `crdb_internal.ranges_no_leases` tables has been updated, and the previous output is deprecated. To enable the new command output, set the `sql.show_ranges_deprecated_behavior.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false`. The new output will become default in v23.2. #99618 +- Previously, if a user specified a [`search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema) in the connection string parameters, it would always be treated as case sensitive. Now, in order to have the schema names in the `search_path` respect case, the user must include double quotes around the name. #101492 +- The deprecated CLI command `debug unsafe-remove-dead-replicas` has been removed. Use `debug recover` instead. #89150

Key Cluster Setting Changes

@@ -455,27 +455,27 @@ The following changes should be reviewed prior to upgrading. Default cluster set | Category | Description | Change Type | Backport version | |---|---|---|---| -| SQL language change | The [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.ttl.default_range_concurrency` and table storage parameter `ttl_range_concurrency` are no longer configurable. [#89392](https://github.com/cockroachdb/cockroach/pull/89392) | No longer configurable | v22.2.1 | -| SQL language change | The `sql.distsql.max_running_flows` [cluster setting]({% link v23.1/cluster-settings.md %}) has been removed. [#84888](https://github.com/cockroachdb/cockroach/pull/84888) | Removed | None | -| Operational change | The [cluster settings]({% link v23.1/cluster-settings.md %}) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, repurposed | None | -| Operational change | The [cluster setting]({% link v23.1/cluster-settings.md %}) `server.web_session.auto_logout.timeout` has been removed. [#90789](https://github.com/cockroachdb/cockroach/pull/90789) | Removed, defaults to true | None | -| Operational change | The [load-based splitter]({% link v23.1/load-based-splitting.md %}) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) [#96128](https://github.com/cockroachdb/cockroach/pull/96128) | Repurposed | None | -| Operational change | The `kv.range_split.load_cpu_threshold` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. Previously there was no minimum so, while unlikely, this could have an impact if you had chosen a custom setting lower than the established minimum. [#98250](https://github.com/cockroachdb/cockroach/pull/98250) | New minimum | None | -| Security update | The new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. [#97429](https://github.com/cockroachdb/cockroach/pull/97429) | New setting | v22.2.6 | -| Security update | The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting]({% link v23.1/cluster-settings.md %}) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2](https://www.cockroachlabs.com/docs/releases/v22.2). [#98254](https://github.com/cockroachdb/cockroach/pull/98254) | Changed default | v22.2.7 | -| Command-line change | The `--drain-wait` argument to the [`cockroach node drain`]({% link v23.1/cockroach-node.md %}) command will be automatically increased if the command detects that it is smaller than the sum of the [cluster settings]({% link v23.1/node-shutdown.md %}#cluster-settings) `server.shutdown.drain_wait`, `server.shutdown.connection_wait`, `server.shutdown.query_wait` times two, and `server.shutdown.lease_transfer_wait`. If the `--drain-wait` argument is 0, then no timeout is used. This recommendation [was already documented]({% link v23.1/node-shutdown.md %}#drain-timeout), but now the advice will be applied automatically. [#98390](https://github.com/cockroachdb/cockroach/pull/98390) | New effect | v22.2.1 | -| Bug fix | RPC connections between nodes now require RPC connections to be established in both directions, otherwise the connection will be closed. This is done to prevent asymmetric network partitions where nodes are able to send outbound messages but not receive inbound messages, which could result in persistent unavailability. This behavior can be disabled by the [cluster setting]({% link v23.1/cluster-settings.md %}) `rpc.dialback.enabled`. [#94778](https://github.com/cockroachdb/cockroach/pull/94778) | New setting, enabled by default | None | -| Bug fix | Fixed a rare bug introduced in v22.2.0 that could cause a node to crash with an `attempting to append refresh spans after the tracked timestamp has moved forward` error when querying virtual tables in the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) or [`pg_catalog`]({% link v23.1/pg-catalog.md %}) system catalogs. If you are experiencing this bug, set the `sql.distsql.use_streamer.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false` before upgrading a cluster to v23.1. [#99443](https://github.com/cockroachdb/cockroach/pull/99443) | New guidance | v22.2.8 | -| Bug fix | The [**Hot Ranges** page]({% link v23.1/ui-hot-ranges-page.md %}) DB Console page would show hot ranges by CPU and not QPS (queries per second), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting]({% link v23.1/cluster-settings.md %}) (default `cpu`). Now the page will always collect statistics based on QPS. (Relates to #96128 in this table.) [#100211](https://github.com/cockroachdb/cockroach/pull/100211) | Repurposed setting | No | +| SQL language change | The [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.ttl.default_range_concurrency` and table storage parameter `ttl_range_concurrency` are no longer configurable. #89392 | No longer configurable | v22.2.1 | +| SQL language change | The `sql.distsql.max_running_flows` [cluster setting]({% link v23.1/cluster-settings.md %}) has been removed. #84888 | Removed | None | +| Operational change | The [cluster settings]({% link v23.1/cluster-settings.md %}) `server.web_session.purge.period` and `server.web_session.purge.max_deletions_per_cycle`, which were specific to the cleanup function for `system.web_sessions`, have been replaced by `server.log_gc.period` and `server.log_gc.max_deletions_per_cycle` which apply to the cleanup function for `system.eventlog`, `system.rangelog` and `system.web_sessions` equally. #90789 | Removed, repurposed | None | +| Operational change | The [cluster setting]({% link v23.1/cluster-settings.md %}) `server.web_session.auto_logout.timeout` has been removed. #90789 | Removed, defaults to true | None | +| Operational change | The [load-based splitter]({% link v23.1/load-based-splitting.md %}) now supports using request CPU usage to split ranges. This is introduced with the previous cluster setting `kv.allocator.load_based_rebalancing.objective`, which when set to `cpu`, will use request CPU usage. The threshold above which CPU usage of a range is considered for splitting is defined in the cluster setting `kv.range_split.load_cpu_threshold`, which has a default value of `250ms`. (Relates to #100211 in this table.) #96128 | Repurposed | None | +| Operational change | The `kv.range_split.load_cpu_threshold` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-kv-range-split-load-cpu-threshold) now has a minimum setting value of `10ms`. Previously there was no minimum so, while unlikely, this could have an impact if you had chosen a custom setting lower than the established minimum. #98250 | New minimum | None | +| Security update | The new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.user_login.downgrade_scram_stored_passwords_to_bcrypt.enabled`, which allows you to migrate passwords from SCRAM to bcrypt during user authentication, defaults to `true`. If it is `true` and if `server.user_login.password_encryption` is `crdb-bcrypt`, then during login, the stored hashed password will be migrated from SCRAM to bcrypt. #97429 | New setting | v22.2.6 | +| Security update | The default value for the `server.user_login.password_hashes.default_cost.scram_sha_256` [cluster setting]({% link v23.1/cluster-settings.md %}) is now 10610. (Previously the default was 119680.) The old value was found to have been too high for many types of client hardware, and in some cases could cause regressions in connection latency. The new value was chosen by running tests with clients that have 1 or 2 vCPUs provisioned. Additionally, the new cluster setting `server.user_login.rehash_scram_stored_passwords_on_cost_change.enabled` was added, and defaults to `true`. If it is `true` and the stored SCRAM password for a user has a different cost than the configured default cost, then the next time the user logs in, their password will automatically be rehashed using the configured default cost. If the rehashing is not desired, then operators should update the `server.user_login.password_hashes.default_cost.scram_sha_256` cluster setting to the value they desire before upgrading. This change is being backported to [v22.2](https://www.cockroachlabs.com/docs/releases/v22.2). #98254 | Changed default | v22.2.7 | +| Command-line change | The `--drain-wait` argument to the [`cockroach node drain`]({% link v23.1/cockroach-node.md %}) command will be automatically increased if the command detects that it is smaller than the sum of the [cluster settings]({% link v23.1/node-shutdown.md %}#cluster-settings) `server.shutdown.drain_wait`, `server.shutdown.connection_wait`, `server.shutdown.query_wait` times two, and `server.shutdown.lease_transfer_wait`. If the `--drain-wait` argument is 0, then no timeout is used. This recommendation [was already documented]({% link v23.1/node-shutdown.md %}#drain-timeout), but now the advice will be applied automatically. #98390 | New effect | v22.2.1 | +| Bug fix | RPC connections between nodes now require RPC connections to be established in both directions, otherwise the connection will be closed. This is done to prevent asymmetric network partitions where nodes are able to send outbound messages but not receive inbound messages, which could result in persistent unavailability. This behavior can be disabled by the [cluster setting]({% link v23.1/cluster-settings.md %}) `rpc.dialback.enabled`. #94778 | New setting, enabled by default | None | +| Bug fix | Fixed a rare bug introduced in v22.2.0 that could cause a node to crash with an `attempting to append refresh spans after the tracked timestamp has moved forward` error when querying virtual tables in the [`crdb_internal`]({% link v23.1/crdb-internal.md %}) or [`pg_catalog`]({% link v23.1/pg-catalog.md %}) system catalogs. If you are experiencing this bug, set the `sql.distsql.use_streamer.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) to `false` before upgrading a cluster to v23.1. #99443 | New guidance | v22.2.8 | +| Bug fix | The [**Hot Ranges** page]({% link v23.1/ui-hot-ranges-page.md %}) DB Console page would show hot ranges by CPU and not QPS (queries per second), depending on the value of the `kv.allocator.load_based_rebalancing.objective` [cluster setting]({% link v23.1/cluster-settings.md %}) (default `cpu`). Now the page will always collect statistics based on QPS. (Relates to #96128 in this table.) #100211 | Repurposed setting | No |

Deprecations

-- Ordinal column references (e.g., `SELECT @1, @2 FROM t`) are now deprecated. By default, statements using this syntax will now result in an error. If desired, such statements can be allowed using the session setting `SET allow_ordinal_column_references=true`. Support for ordinal column references is scheduled to be removed in upcoming version v23.2. [#93754][#93754] -- The `CONTROLCHANGEFEED` [role option]({% link v23.1/alter-role.md %}#role-options) will be deprecated in the future (see issue [#94757](https://github.com/cockroachdb/cockroach/issues/94757)). With this change, usages of the `CONTROLCHANGEFEED` role option will come with a deprecation warning. Its existing behavior remains the same. The `SELECT` and `CHANGEFEED` privileges will be used for changefeeds henceforth: +- Ordinal column references (e.g., `SELECT @1, @2 FROM t`) are now deprecated. By default, statements using this syntax will now result in an error. If desired, such statements can be allowed using the session setting `SET allow_ordinal_column_references=true`. Support for ordinal column references is scheduled to be removed in upcoming version v23.2. #93754 +- The `CONTROLCHANGEFEED` [role option]({% link v23.1/alter-role.md %}#role-options) will be deprecated in the future (see issue #94757). With this change, usages of the `CONTROLCHANGEFEED` role option will come with a deprecation warning. Its existing behavior remains the same. The `SELECT` and `CHANGEFEED` privileges will be used for changefeeds henceforth: - The `SELECT` privilege on a set of tables allows a user to run core changefeeds against them. - The `CHANGEFEED` privilege on a set of tables allows a user to run enterprise changefeeds on them, and also manage the underlying changefeed job (ie. view, pause, cancel, and resume the job). - Notably, a new [cluster setting]({% link v23.1/cluster-settings.md %}) `changefeed.permissions.require_external_connection_sink.enabled` is added and set to `false` by default. Enabling this setting restricts users with `CHANGEFEED` on a set of tables to create enterprise changefeeds into external connections only. To use a given external connection, a user typically needs the `USAGE` privilege on it. Note that `ALTER DEFAULT PRIVILEGES` can be used with both the `CHANGEFEED` and `SELECT` privileges to assign coarse-grained permissions (i.e., assign permissions to all tables in a schema rather than manually assign them for each table). [#94796][#94796] -- Deprecated the `PGDUMP` and `MYSQLDUMP` formats for [`IMPORT`]({% link v23.1/import.md %}). They are still present, but will be removed in a future release. See the [Migration Overview]({% link molt/migration-overview.md %}) page for alternatives. [#96386][#96386] + Notably, a new [cluster setting]({% link v23.1/cluster-settings.md %}) `changefeed.permissions.require_external_connection_sink.enabled` is added and set to `false` by default. Enabling this setting restricts users with `CHANGEFEED` on a set of tables to create enterprise changefeeds into external connections only. To use a given external connection, a user typically needs the `USAGE` privilege on it. Note that `ALTER DEFAULT PRIVILEGES` can be used with both the `CHANGEFEED` and `SELECT` privileges to assign coarse-grained permissions (i.e., assign permissions to all tables in a schema rather than manually assign them for each table). #94796 +- Deprecated the `PGDUMP` and `MYSQLDUMP` formats for [`IMPORT`]({% link v23.1/import.md %}). They are still present, but will be removed in a future release. See the [Migration Overview]({% link molt/migration-overview.md %}) page for alternatives. #96386

Known limitations

diff --git a/src/current/_includes/releases/v23.1/v23.1.1.md b/src/current/_includes/releases/v23.1/v23.1.1.md index 860c1e56dd2..89c71d8fa1c 100644 --- a/src/current/_includes/releases/v23.1/v23.1.1.md +++ b/src/current/_includes/releases/v23.1/v23.1.1.md @@ -6,7 +6,7 @@ Release Date: May 16, 2023

Bug fixes

-- In CockroachDB v23.1.0 and its testing releases, executing [`COPY`]({% link v23.1/copy.md %}) to a target table that has multiple column families could corrupt the table. If data was copied into a table with existing rows, the data in those rows may be irrecoverable. The data would need to be dropped and re-[copied]({% link v23.1/copy.md %}) to be encoded correctly. This has now been fixed. See [Technical Advisory 103220](https://www.cockroachlabs.com/docs/advisories/a103220) for more information. [#103323][#103323] +- In CockroachDB v23.1.0 and its testing releases, executing [`COPY`]({% link v23.1/copy.md %}) to a target table that has multiple column families could corrupt the table. If data was copied into a table with existing rows, the data in those rows may be irrecoverable. The data would need to be dropped and re-[copied]({% link v23.1/copy.md %}) to be encoded correctly. This has now been fixed. See [Technical Advisory 103220](https://www.cockroachlabs.com/docs/advisories/a103220) for more information. #103323
@@ -16,4 +16,3 @@ This release includes 2 merged PRs by 2 authors.
-[#103323]: https://github.com/cockroachdb/cockroach/pull/103323 diff --git a/src/current/_includes/releases/v23.1/v23.1.10.md b/src/current/_includes/releases/v23.1/v23.1.10.md index 63e6b1f0015..4e7d07a2c1e 100644 --- a/src/current/_includes/releases/v23.1/v23.1.10.md +++ b/src/current/_includes/releases/v23.1/v23.1.10.md @@ -6,7 +6,7 @@ Release Date: September 18, 2023

Bug fixes

-- Removed buggy [TTL]({% link v23.1/row-level-ttl.md %}) descriptor repair. In v23.1.9, upgrading from v22.2 to v23.1.9 incorrectly removed TTL storage parameters from tables (visible by running `SHOW CREATE TABLE ;`) while attempting to repair the table descriptors. This resulted in the node that attempted to run the TTL job crashing due to a panic caused by the missing TTL storage parameters. Clusters currently on v22.2 should upgrade to v23.1.10 and higher, **not** to v23.1.9. [#110569][#110569] +- Removed buggy [TTL]({% link v23.1/row-level-ttl.md %}) descriptor repair. In v23.1.9, upgrading from v22.2 to v23.1.9 incorrectly removed TTL storage parameters from tables (visible by running `SHOW CREATE TABLE ;`) while attempting to repair the table descriptors. This resulted in the node that attempted to run the TTL job crashing due to a panic caused by the missing TTL storage parameters. Clusters currently on v22.2 should upgrade to v23.1.10 and higher, **not** to v23.1.9. #110569
@@ -16,4 +16,3 @@ This release includes 2 merged PRs by 2 authors.
-[#110569]: https://github.com/cockroachdb/cockroach/pull/110569 diff --git a/src/current/_includes/releases/v23.1/v23.1.11.md b/src/current/_includes/releases/v23.1/v23.1.11.md index 1d6bbb627fe..ee16ad5b715 100644 --- a/src/current/_includes/releases/v23.1/v23.1.11.md +++ b/src/current/_includes/releases/v23.1/v23.1.11.md @@ -6,72 +6,72 @@ Release Date: October 2, 2023

SQL language changes

-- Added a new syntax to [`SHOW DEFAULT PRIVILEGES`]({% link v23.1/show-default-privileges.md %}), `SHOW DEFAULT PRIVILEGES FOR GRANTEE ` that shows the default privileges that a grantee received. [#108285][#108285] -- The admin API [database details]({% link v23.1/cluster-api.md %}) endpoint now returns authoritative range statistics. [#108727][#108727] -- Added the [cluster setting](../v23.1/cluster-settings.html) `sql.stats.limit_table_size.enabled`, which controls whether or not CockroachDB enforces the row limit set by `sql.stats.persisted_rows.max` in the `system.statement_statistics` and `system.transaction_statistics` tables. [#108912][#108912] -- Optimized the `sql-stats-compaction` [job](../v23.1/show-jobs.html)'s [delete query](../v23.1/delete.html) to avoid a [full scan](../v23.1/sql-tuning-with-explain.html#issue-full-table-scans). This helps avoid a [transaction retry error](../v23.1/common-errors.html#restart-transaction), which can cause the job to fail. [#108987][#108987] -- Fixed an issue where the UI was missing query text and details when looking at the [SQL activity transactions page]({% link v23.1/ui-sql-dashboard.md %}#transactions) if there were more than 500 transactions or statements. The `crdb_internal.statement_activity` table now includes all statements for a transaction that are in the `crdb_internal.transaction_activity` table. [#109479][#109479] -- Added the `VIEWSYSTEMTABLE` [system privilege]({% link v23.1/security-reference/authorization.md %}#supported-privileges). Users with this privilege have [`SELECT`](../v23.1/selection-queries.html) privileges for all tables in the `system` database. [#109525][#109525] -- The `oidvectortypes` [built-in function]({% link v23.1/functions-and-operators.md %}) has been implemented, which can format an `oidvector`. [#109711][#109711] -- The internal persisted [statistics](../v23.1/cost-based-optimizer.html#table-statistics) table max size check is now done once an hour instead of every 10 minutes. This reduces the risk of serialization errors on the statistics tables. [#109707][#109707] -- Introspection queries will now show the internal `node` user as the owner of tables in [`pg_catalog`]({% link v23.1/pg-catalog.md %}) and [`information_schema`]({% link v23.1/information-schema.md %}). Previously, the owner was shown as `admin`, but that was inaccurate since users with the [`admin` role]({% link v23.1/security-reference/authorization.md %}#admin-role) could not modify these tables in any way. [#109735][#109735] +- Added a new syntax to [`SHOW DEFAULT PRIVILEGES`]({% link v23.1/show-default-privileges.md %}), `SHOW DEFAULT PRIVILEGES FOR GRANTEE ` that shows the default privileges that a grantee received. #108285 +- The admin API [database details]({% link v23.1/cluster-api.md %}) endpoint now returns authoritative range statistics. #108727 +- Added the [cluster setting](../v23.1/cluster-settings.html) `sql.stats.limit_table_size.enabled`, which controls whether or not CockroachDB enforces the row limit set by `sql.stats.persisted_rows.max` in the `system.statement_statistics` and `system.transaction_statistics` tables. #108912 +- Optimized the `sql-stats-compaction` [job](../v23.1/show-jobs.html)'s [delete query](../v23.1/delete.html) to avoid a [full scan](../v23.1/sql-tuning-with-explain.html#issue-full-table-scans). This helps avoid a [transaction retry error](../v23.1/common-errors.html#restart-transaction), which can cause the job to fail. #108987 +- Fixed an issue where the UI was missing query text and details when looking at the [SQL activity transactions page]({% link v23.1/ui-sql-dashboard.md %}#transactions) if there were more than 500 transactions or statements. The `crdb_internal.statement_activity` table now includes all statements for a transaction that are in the `crdb_internal.transaction_activity` table. #109479 +- Added the `VIEWSYSTEMTABLE` [system privilege]({% link v23.1/security-reference/authorization.md %}#supported-privileges). Users with this privilege have [`SELECT`](../v23.1/selection-queries.html) privileges for all tables in the `system` database. #109525 +- The `oidvectortypes` [built-in function]({% link v23.1/functions-and-operators.md %}) has been implemented, which can format an `oidvector`. #109711 +- The internal persisted [statistics](../v23.1/cost-based-optimizer.html#table-statistics) table max size check is now done once an hour instead of every 10 minutes. This reduces the risk of serialization errors on the statistics tables. #109707 +- Introspection queries will now show the internal `node` user as the owner of tables in [`pg_catalog`]({% link v23.1/pg-catalog.md %}) and [`information_schema`]({% link v23.1/information-schema.md %}). Previously, the owner was shown as `admin`, but that was inaccurate since users with the [`admin` role]({% link v23.1/security-reference/authorization.md %}#admin-role) could not modify these tables in any way. #109735

Operational changes

-- Added the `kv.enqueue_in_replicate_queue_on_span_config_update.enabled` [cluster setting](../v23.1/cluster-settings.html). When set to `true`, [stores](../v23.1/cockroach-start.html#store) in the cluster will enqueue [replicas](../v23.1/architecture/overview.html#architecture-replica) for [replication changes](../v23.1/architecture/replication-layer.html) upon receiving config updates that could affect the replica. This setting is off by default. Enabling this setting speeds up how quickly config-triggered replication changes begin, but adds additional CPU overhead. The overhead scales with the number of leaseholders. [#108812][#108812] -- Added a new [cluster setting](../v23.1/cluster-settings.html) named `server.hot_ranges_request.node.timeout`, with a default value of 5 minutes. The setting controls the maximum amount of time that a [hot ranges request]({% link v23.1/ui-hot-ranges-page.md %}) will spend waiting for a node to provide a response. Set it to `0` to disable timeouts. [#109015][#109015] -- Span stats requests will return a partial result if the request encounters any errors. Errors that would have previously terminated the request are now included in the response. [#109008][#109008] -- [`BACKUP`](../v23.1/backup.html) now skips contacting the ranges for tables on which [`exclude_data_from_backup`](../v23.1/alter-table.html#exclude-a-tables-data-from-backups) is set, and can thus succeed even if an excluded table is unavailable. [#109123][#109123] -- The [RPC]({% link v23.1/architecture/distribution-layer.md %}#grpc) dial and heartbeat timeouts can now be configured via the environment variables `COCKROACH_RPC_DIAL_TIMEOUT` (default 2x `COCKROACH_NETWORK_TIMEOUT` or 2x2=4 seconds) and `COCKROACH_RPC_HEARTBEAT_TIMEOUT` (default 3x `COCKROACH_NETWORK_TIMEOUT` or 3x2=6 seconds). This allows configuring these values independently of `COCKROACH_NETWORK_TIMEOUT`. [#109358][#109358] -- The default [gRPC]({% link v23.1/architecture/distribution-layer.md %}#grpc) server-side send timeout has been increased from 2 seconds to 4 seconds (1x to 2x of `COCKROACH_NETWORK_TIMEOUT`), to avoid spurious connection failures in certain scenarios. This can be controlled via the new environment variable `COCKROACH_RPC_SERVER_TIMEOUT`. [#109620][#109620] -- Added a new gauge [metric](../v23.1/metrics.html) `sql.schema.invalid_objects`. This gauge is periodically updated based on the schedule set by the `sql.schema.telemetry.recurrence` [cluster setting](../v23.1/cluster-settings.html). When the metric is updated, it counts the number of schema objects ([tables](../v23.1/create-table.html), [types](../v23.1/create-type.html), [schemas](../v23.1/create-schema.html), [databases](../v23.1/create-database.html), and [functions](../v23.1/user-defined-functions.html)) that are in an invalid state according to CockroachDB's internal validation checks. This metric is expected to be zero (`0`) in a healthy cluster. If it is not zero, it indicates that there is a problem that must be repaired. [#109733][#109733] -- Added two new [changefeed metrics](../v23.1/monitor-and-debug-changefeeds.html#recommended-changefeed-metrics-to-track): `changefeed.checkpoint_progress` is similar to `changefeed.max_behind_nanos`, but it also supports [metrics labels](../v23.1/monitor-and-debug-changefeeds.html#using-changefeed-metrics-labels). `changefeed.aggregator_progress` tracks the progress of individual aggregators (the lowest timestamp for which all aggregators with the label have emitted all values they're responsible for). [#109744][#109744] +- Added the `kv.enqueue_in_replicate_queue_on_span_config_update.enabled` [cluster setting](../v23.1/cluster-settings.html). When set to `true`, [stores](../v23.1/cockroach-start.html#store) in the cluster will enqueue [replicas](../v23.1/architecture/overview.html#architecture-replica) for [replication changes](../v23.1/architecture/replication-layer.html) upon receiving config updates that could affect the replica. This setting is off by default. Enabling this setting speeds up how quickly config-triggered replication changes begin, but adds additional CPU overhead. The overhead scales with the number of leaseholders. #108812 +- Added a new [cluster setting](../v23.1/cluster-settings.html) named `server.hot_ranges_request.node.timeout`, with a default value of 5 minutes. The setting controls the maximum amount of time that a [hot ranges request]({% link v23.1/ui-hot-ranges-page.md %}) will spend waiting for a node to provide a response. Set it to `0` to disable timeouts. #109015 +- Span stats requests will return a partial result if the request encounters any errors. Errors that would have previously terminated the request are now included in the response. #109008 +- [`BACKUP`](../v23.1/backup.html) now skips contacting the ranges for tables on which [`exclude_data_from_backup`](../v23.1/alter-table.html#exclude-a-tables-data-from-backups) is set, and can thus succeed even if an excluded table is unavailable. #109123 +- The [RPC]({% link v23.1/architecture/distribution-layer.md %}#grpc) dial and heartbeat timeouts can now be configured via the environment variables `COCKROACH_RPC_DIAL_TIMEOUT` (default 2x `COCKROACH_NETWORK_TIMEOUT` or 2x2=4 seconds) and `COCKROACH_RPC_HEARTBEAT_TIMEOUT` (default 3x `COCKROACH_NETWORK_TIMEOUT` or 3x2=6 seconds). This allows configuring these values independently of `COCKROACH_NETWORK_TIMEOUT`. #109358 +- The default [gRPC]({% link v23.1/architecture/distribution-layer.md %}#grpc) server-side send timeout has been increased from 2 seconds to 4 seconds (1x to 2x of `COCKROACH_NETWORK_TIMEOUT`), to avoid spurious connection failures in certain scenarios. This can be controlled via the new environment variable `COCKROACH_RPC_SERVER_TIMEOUT`. #109620 +- Added a new gauge [metric](../v23.1/metrics.html) `sql.schema.invalid_objects`. This gauge is periodically updated based on the schedule set by the `sql.schema.telemetry.recurrence` [cluster setting](../v23.1/cluster-settings.html). When the metric is updated, it counts the number of schema objects ([tables](../v23.1/create-table.html), [types](../v23.1/create-type.html), [schemas](../v23.1/create-schema.html), [databases](../v23.1/create-database.html), and [functions](../v23.1/user-defined-functions.html)) that are in an invalid state according to CockroachDB's internal validation checks. This metric is expected to be zero (`0`) in a healthy cluster. If it is not zero, it indicates that there is a problem that must be repaired. #109733 +- Added two new [changefeed metrics](../v23.1/monitor-and-debug-changefeeds.html#recommended-changefeed-metrics-to-track): `changefeed.checkpoint_progress` is similar to `changefeed.max_behind_nanos`, but it also supports [metrics labels](../v23.1/monitor-and-debug-changefeeds.html#using-changefeed-metrics-labels). `changefeed.aggregator_progress` tracks the progress of individual aggregators (the lowest timestamp for which all aggregators with the label have emitted all values they're responsible for). #109744

Command-line changes

-- Removed the command `\demo recommission` from [`cockroach demo`]({% link v23.1/cockroach-demo.md %}). It had been obsolete and non-functional since v20.2. [#108631][#108631] +- Removed the command `\demo recommission` from [`cockroach demo`]({% link v23.1/cockroach-demo.md %}). It had been obsolete and non-functional since v20.2. #108631

DB Console changes

-- Users without the `VIEWCLUSTERSETTINGS` [permission]({% link v23.1/security-reference/authorization.md %}#supported-privileges), but with the `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` permissions, can now see [index recommendations]({% link v23.1/ui-databases-page.md %}#index-recommendations). [#109466][#109466] -- CockroachDB will now show a warning when the time period selected on the [SQL Activity page](../v23.1/ui-overview.html#sql-activity) is older than the oldest data available. [#109468][#109468] +- Users without the `VIEWCLUSTERSETTINGS` [permission]({% link v23.1/security-reference/authorization.md %}#supported-privileges), but with the `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` permissions, can now see [index recommendations]({% link v23.1/ui-databases-page.md %}#index-recommendations). #109466 +- CockroachDB will now show a warning when the time period selected on the [SQL Activity page](../v23.1/ui-overview.html#sql-activity) is older than the oldest data available. #109468

Bug fixes

-- Fixed a buggy TTL descriptor repair by removing it. Previously, upgrading from v22.2.x to 23.1.9 incorrectly removed [TTL storage params]({% link v23.1/row-level-ttl.md %}#ttl-storage-parameters) from tables (visible via [`SHOW CREATE TABLE `]({% link v23.1/show-create.md %})) while attempting to repair table descriptors. This resulted in the node that attempts to run the TTL [job](../v23.1/show-jobs.html) crashing due to a panic caused by the missing TTL storage parameters. Clusters currently on v22.2.x should **not** be upgraded to v23.1.9 and should be upgraded directly to v23.1.10 or later. For more information, see [Technical Advisory 110363](../advisories/a110363.html). [#110562][#110562] -- Users with the `VIEWACTIVITY` [privilege]({% link v23.1/security-reference/authorization.md %}#supported-privileges) should be able to see other users' sessions from both the [CLI]({% link v23.1/cockroach-sql.md %}) and the [DB Console]({% link v23.1/ui-overview.md %}). [#108571][#108571] -- Fixed errors on the [**Sessions** page](../v23.1/ui-sessions-page.html) in the [DB Console](../v23.1/ui-overview.html) when a session's memory usage is zero bytes. [#108619][#108619] -- Fixed a bug in [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) whereby `\demo add` could sometimes crash with an error "`index out of range [...] with length ...`". This bug was introduced in v19.x. [#108631][#108631] -- Fixed a bug whereby the command `\demo decommission` in [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) could sometime leave the demo cluster in a broken state. This bug was introduced in v20.2. [#108631][#108631] -- Fixed a bug in the [index](../v23.1/indexes.html) recommendations provided in the [`EXPLAIN`]({% link v23.1/explain.md %}) output where [`ALTER INDEX ... VISIBLE`](../v23.1/alter-index.html) index recommendations may suggest making the wrong index visible when there are multiple invisible indexes in a table. [#108646][#108646] -- Fixed a bug that could cause a query with [`LIMIT`]({% link v23.1/limit-offset.md %}) and [`ORDER BY`](../v23.1/order-by.html) to return results in the wrong order. This bug could cause incorrect results as well if the `LIMIT` was nested within an outer query, e.g. under another `LIMIT`. This bug had existed since before v22.2. [#106798][#106798] -- Fixed a bug with [collated string]({% link v23.1/collate.md %}) type checking with nested [case expressions]({% link v23.1/scalar-expressions.md %}#simple-case-expressions) where an inner case had no explicit collated type. [#108345][#108345] -- Fixed a bug where [`RELEASE SAVEPOINT`]({% link v23.1/release-savepoint.md %}) could incorrectly emit the message `"cannot publish new versions for descriptors"` instead of a [retryable error]({% link v23.1/common-errors.md %}#restart-transaction) to applications. [#108478][#108478] -- Fixed a bug that could cause CPU usage to increase over time. [#108801][#108801] -- Fixed a bug introduced in v22.1 that could cause a [join](../v23.1/joins.html) to infinite-loop in rare cases when (1) the join filter is not an equality and (2) no columns from the left input are returned. [#106875][#106875] -- Users with the `VIEWACTIVITY` [permission]({% link v23.1/security-reference/authorization.md %}#supported-privileges) can now view correct values for the current timezone in the [DB Console]({% link v23.1/ui-overview.md %}). [#108780][#108780] -- Fixed a bug present since v23.1.0 that would cause queries on the `pg_catalog.pg_statistic_ext` table to fail if a table was dropped recently. This bug also caused the `\d` [CLI]({% link v23.1/cockroach-sql.md %}) shortcut to encounter errors. [#108909][#108909] -- Fixed a bug where `pg_attribute` and `pg_attrdef` did not properly return results for generated columns. [#109035][#109035] -- The [Schema Insights](../v23.1/ui-insights-page.html#schema-insights-tab) view should hit request timeouts less frequently, if at all. [#109014][#109014] -- Fixed a bug that caused nodes to crash when attempting to `EXECUTE` a prepared statement with an argument that referenced a [user-defined function]({% link v23.1/user-defined-functions.md %}). This bug was present since user-defined functions were introduced in v22.2. [#108469][#108469] -- Fixed a bug so the filter on the [Statements Page](../v23.1/ui-statements-page.html) works when the app name is an empty string (represented as 'unset'). [#108985][#108985] -- Fixed a bug where a `SpanStatsRequest` would return post-replicated MVCC stats, which was causing incorrect output in [`SHOW RANGES ... WITH DETAILS`]({% link v23.1/show-ranges.md %}). Now, a `SpanStatsRequest` returns the logical MVCC stats for the requested span. [#109234][#109234] -- Fixed an issue with [the "full scan" filter in the UI]({% link v23.1/ui-sql-dashboard.md %}#full-table-index-scans), where the filter was not returning any results. [#109274][#109274] -- Going to the [Transaction Details Page]({% link v23.1/ui-transactions-page.md %}#transaction-details-page) from [Workload Insights > High Contention]({% link v23.1/ui-insights-page.md %}#high-contention) is fixed. Previously, the link would not show any results. [#109253][#109253] -- Fixed the column names on the [`SELECT` queries]({% link v23.1/selection-queries.md %}) against the tables `crdb_internal.node_txn_execution_insights` and `crdb_internal.cluster_txn_execution_insights` during the [creation of debug.zip files]({% link v23.1/cockroach-debug-zip.md %}). [#109515][#109515] -- Fixed a bug that could cause some rows to be silently skipped during [`IMPORT`](../v23.1/import.html) when a node failed. [#109663][#109663] -- Fixed a bug in [geospatial queries]({% link v23.1/spatial-data-overview.md %}) where more rows could be returned by the query than expected. This could happen when a query filter of the form `ST_Distance(geog1, geog2) > constant` or `ST_MaxDistance(geom1, geom2) > constant`, where the operator was one of `>`, `<`, `>=`, `<=`, or a filter of the form `ST_Distance(geog1, geog2, false) = 0` would sometimes mistakenly evaluate to `true` when one or both of the inputs was `NULL` or an empty [geography]({% link v23.1/architecture/glossary.md %}#geography) or [geometry]({% link v23.1/architecture/glossary.md %}#geometry). [#109393][#109393] -- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html). This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. [#109777][#109777] -- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html) with the `bulkio.restore.use_simple_import_spans` [cluster setting]({% link v23.1/cluster-settings.md %}) set to `true`. This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. [#109940][#109940] -- The `difference` [built-in function]({% link v23.1/functions-and-operators.md %}) had its return type incorrectly set to [`string`]({% link v23.1/string.md %}) instead of [`integer`]({% link v23.1/int.md %}). [#109752][#109752] -- Fixed a bug where dependencies on [sequences]({% link v23.1/show-sequences.md %}) from [tables]({% link v23.1/show-tables.md %}) would be reported with the wrong value for the `classid` column in the [`pg_catalog.pg_depend` table]({% link v23.1/pg-catalog.md %}). [#110206][#110206] -- Fixed a bug that could cause a [transaction](../v23.1/transactions.html) performing multiple parallel [foreign key]({% link v23.1/foreign-key.md %}) checks to return a `concurrent txn use detected` error. [#110273][#110273] -- Various observability pages in the [DB Console]({% link v23.1/ui-overview.md %}) no longer crash when they encounter zeros (e.g., a session with no memory allocated). [#108785][#108785] +- Fixed a buggy TTL descriptor repair by removing it. Previously, upgrading from v22.2.x to 23.1.9 incorrectly removed [TTL storage params]({% link v23.1/row-level-ttl.md %}#ttl-storage-parameters) from tables (visible via [`SHOW CREATE TABLE `]({% link v23.1/show-create.md %})) while attempting to repair table descriptors. This resulted in the node that attempts to run the TTL [job](../v23.1/show-jobs.html) crashing due to a panic caused by the missing TTL storage parameters. Clusters currently on v22.2.x should **not** be upgraded to v23.1.9 and should be upgraded directly to v23.1.10 or later. For more information, see [Technical Advisory 110363](../advisories/a110363.html). #110562 +- Users with the `VIEWACTIVITY` [privilege]({% link v23.1/security-reference/authorization.md %}#supported-privileges) should be able to see other users' sessions from both the [CLI]({% link v23.1/cockroach-sql.md %}) and the [DB Console]({% link v23.1/ui-overview.md %}). #108571 +- Fixed errors on the [**Sessions** page](../v23.1/ui-sessions-page.html) in the [DB Console](../v23.1/ui-overview.html) when a session's memory usage is zero bytes. #108619 +- Fixed a bug in [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) whereby `\demo add` could sometimes crash with an error "`index out of range [...] with length ...`". This bug was introduced in v19.x. #108631 +- Fixed a bug whereby the command `\demo decommission` in [`cockroach demo`]({% link v23.1/cockroach-demo.md %}) could sometime leave the demo cluster in a broken state. This bug was introduced in v20.2. #108631 +- Fixed a bug in the [index](../v23.1/indexes.html) recommendations provided in the [`EXPLAIN`]({% link v23.1/explain.md %}) output where [`ALTER INDEX ... VISIBLE`](../v23.1/alter-index.html) index recommendations may suggest making the wrong index visible when there are multiple invisible indexes in a table. #108646 +- Fixed a bug that could cause a query with [`LIMIT`]({% link v23.1/limit-offset.md %}) and [`ORDER BY`](../v23.1/order-by.html) to return results in the wrong order. This bug could cause incorrect results as well if the `LIMIT` was nested within an outer query, e.g. under another `LIMIT`. This bug had existed since before v22.2. #106798 +- Fixed a bug with [collated string]({% link v23.1/collate.md %}) type checking with nested [case expressions]({% link v23.1/scalar-expressions.md %}#simple-case-expressions) where an inner case had no explicit collated type. #108345 +- Fixed a bug where [`RELEASE SAVEPOINT`]({% link v23.1/release-savepoint.md %}) could incorrectly emit the message `"cannot publish new versions for descriptors"` instead of a [retryable error]({% link v23.1/common-errors.md %}#restart-transaction) to applications. #108478 +- Fixed a bug that could cause CPU usage to increase over time. #108801 +- Fixed a bug introduced in v22.1 that could cause a [join](../v23.1/joins.html) to infinite-loop in rare cases when (1) the join filter is not an equality and (2) no columns from the left input are returned. #106875 +- Users with the `VIEWACTIVITY` [permission]({% link v23.1/security-reference/authorization.md %}#supported-privileges) can now view correct values for the current timezone in the [DB Console]({% link v23.1/ui-overview.md %}). #108780 +- Fixed a bug present since v23.1.0 that would cause queries on the `pg_catalog.pg_statistic_ext` table to fail if a table was dropped recently. This bug also caused the `\d` [CLI]({% link v23.1/cockroach-sql.md %}) shortcut to encounter errors. #108909 +- Fixed a bug where `pg_attribute` and `pg_attrdef` did not properly return results for generated columns. #109035 +- The [Schema Insights](../v23.1/ui-insights-page.html#schema-insights-tab) view should hit request timeouts less frequently, if at all. #109014 +- Fixed a bug that caused nodes to crash when attempting to `EXECUTE` a prepared statement with an argument that referenced a [user-defined function]({% link v23.1/user-defined-functions.md %}). This bug was present since user-defined functions were introduced in v22.2. #108469 +- Fixed a bug so the filter on the [Statements Page](../v23.1/ui-statements-page.html) works when the app name is an empty string (represented as 'unset'). #108985 +- Fixed a bug where a `SpanStatsRequest` would return post-replicated MVCC stats, which was causing incorrect output in [`SHOW RANGES ... WITH DETAILS`]({% link v23.1/show-ranges.md %}). Now, a `SpanStatsRequest` returns the logical MVCC stats for the requested span. #109234 +- Fixed an issue with [the "full scan" filter in the UI]({% link v23.1/ui-sql-dashboard.md %}#full-table-index-scans), where the filter was not returning any results. #109274 +- Going to the [Transaction Details Page]({% link v23.1/ui-transactions-page.md %}#transaction-details-page) from [Workload Insights > High Contention]({% link v23.1/ui-insights-page.md %}#high-contention) is fixed. Previously, the link would not show any results. #109253 +- Fixed the column names on the [`SELECT` queries]({% link v23.1/selection-queries.md %}) against the tables `crdb_internal.node_txn_execution_insights` and `crdb_internal.cluster_txn_execution_insights` during the [creation of debug.zip files]({% link v23.1/cockroach-debug-zip.md %}). #109515 +- Fixed a bug that could cause some rows to be silently skipped during [`IMPORT`](../v23.1/import.html) when a node failed. #109663 +- Fixed a bug in [geospatial queries]({% link v23.1/spatial-data-overview.md %}) where more rows could be returned by the query than expected. This could happen when a query filter of the form `ST_Distance(geog1, geog2) > constant` or `ST_MaxDistance(geom1, geom2) > constant`, where the operator was one of `>`, `<`, `>=`, `<=`, or a filter of the form `ST_Distance(geog1, geog2, false) = 0` would sometimes mistakenly evaluate to `true` when one or both of the inputs was `NULL` or an empty [geography]({% link v23.1/architecture/glossary.md %}#geography) or [geometry]({% link v23.1/architecture/glossary.md %}#geometry). #109393 +- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html). This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. #109777 +- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html) with the `bulkio.restore.use_simple_import_spans` [cluster setting]({% link v23.1/cluster-settings.md %}) set to `true`. This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. #109940 +- The `difference` [built-in function]({% link v23.1/functions-and-operators.md %}) had its return type incorrectly set to [`string`]({% link v23.1/string.md %}) instead of [`integer`]({% link v23.1/int.md %}). #109752 +- Fixed a bug where dependencies on [sequences]({% link v23.1/show-sequences.md %}) from [tables]({% link v23.1/show-tables.md %}) would be reported with the wrong value for the `classid` column in the [`pg_catalog.pg_depend` table]({% link v23.1/pg-catalog.md %}). #110206 +- Fixed a bug that could cause a [transaction](../v23.1/transactions.html) performing multiple parallel [foreign key]({% link v23.1/foreign-key.md %}) checks to return a `concurrent txn use detected` error. #110273 +- Various observability pages in the [DB Console]({% link v23.1/ui-overview.md %}) no longer crash when they encounter zeros (e.g., a session with no memory allocated). #108785

Performance improvements

-- Improved the cost of resolving a user-defined [enum](../v23.1/enum.html) type that has many values. [#109470][#109470] -- Queries that access the [`pg_catalog`]({% link v23.1/pg-catalog.md %}) and [`information_schema`]({% link v23.1/information-schema.md %}) that perform introspection on other tables in those schemas are now significantly faster. [#109735][#109735] +- Improved the cost of resolving a user-defined [enum](../v23.1/enum.html) type that has many values. #109470 +- Queries that access the [`pg_catalog`]({% link v23.1/pg-catalog.md %}) and [`information_schema`]({% link v23.1/information-schema.md %}) that perform introspection on other tables in those schemas are now significantly faster. #109735
@@ -81,57 +81,3 @@ This release includes 204 merged PRs by 63 authors.
-[#106798]: https://github.com/cockroachdb/cockroach/pull/106798 -[#106875]: https://github.com/cockroachdb/cockroach/pull/106875 -[#108285]: https://github.com/cockroachdb/cockroach/pull/108285 -[#108345]: https://github.com/cockroachdb/cockroach/pull/108345 -[#108469]: https://github.com/cockroachdb/cockroach/pull/108469 -[#108478]: https://github.com/cockroachdb/cockroach/pull/108478 -[#108571]: https://github.com/cockroachdb/cockroach/pull/108571 -[#108619]: https://github.com/cockroachdb/cockroach/pull/108619 -[#108631]: https://github.com/cockroachdb/cockroach/pull/108631 -[#108646]: https://github.com/cockroachdb/cockroach/pull/108646 -[#108727]: https://github.com/cockroachdb/cockroach/pull/108727 -[#108780]: https://github.com/cockroachdb/cockroach/pull/108780 -[#108785]: https://github.com/cockroachdb/cockroach/pull/108785 -[#108801]: https://github.com/cockroachdb/cockroach/pull/108801 -[#108812]: https://github.com/cockroachdb/cockroach/pull/108812 -[#108909]: https://github.com/cockroachdb/cockroach/pull/108909 -[#108912]: https://github.com/cockroachdb/cockroach/pull/108912 -[#108985]: https://github.com/cockroachdb/cockroach/pull/108985 -[#108987]: https://github.com/cockroachdb/cockroach/pull/108987 -[#109008]: https://github.com/cockroachdb/cockroach/pull/109008 -[#109014]: https://github.com/cockroachdb/cockroach/pull/109014 -[#109015]: https://github.com/cockroachdb/cockroach/pull/109015 -[#109018]: https://github.com/cockroachdb/cockroach/pull/109018 -[#109035]: https://github.com/cockroachdb/cockroach/pull/109035 -[#109123]: https://github.com/cockroachdb/cockroach/pull/109123 -[#109144]: https://github.com/cockroachdb/cockroach/pull/109144 -[#109195]: https://github.com/cockroachdb/cockroach/pull/109195 -[#109234]: https://github.com/cockroachdb/cockroach/pull/109234 -[#109235]: https://github.com/cockroachdb/cockroach/pull/109235 -[#109253]: https://github.com/cockroachdb/cockroach/pull/109253 -[#109274]: https://github.com/cockroachdb/cockroach/pull/109274 -[#109358]: https://github.com/cockroachdb/cockroach/pull/109358 -[#109393]: https://github.com/cockroachdb/cockroach/pull/109393 -[#109466]: https://github.com/cockroachdb/cockroach/pull/109466 -[#109468]: https://github.com/cockroachdb/cockroach/pull/109468 -[#109470]: https://github.com/cockroachdb/cockroach/pull/109470 -[#109479]: https://github.com/cockroachdb/cockroach/pull/109479 -[#109515]: https://github.com/cockroachdb/cockroach/pull/109515 -[#109525]: https://github.com/cockroachdb/cockroach/pull/109525 -[#109620]: https://github.com/cockroachdb/cockroach/pull/109620 -[#109632]: https://github.com/cockroachdb/cockroach/pull/109632 -[#109663]: https://github.com/cockroachdb/cockroach/pull/109663 -[#109707]: https://github.com/cockroachdb/cockroach/pull/109707 -[#109711]: https://github.com/cockroachdb/cockroach/pull/109711 -[#109733]: https://github.com/cockroachdb/cockroach/pull/109733 -[#109735]: https://github.com/cockroachdb/cockroach/pull/109735 -[#109744]: https://github.com/cockroachdb/cockroach/pull/109744 -[#109752]: https://github.com/cockroachdb/cockroach/pull/109752 -[#109777]: https://github.com/cockroachdb/cockroach/pull/109777 -[#109859]: https://github.com/cockroachdb/cockroach/pull/109859 -[#109940]: https://github.com/cockroachdb/cockroach/pull/109940 -[#110206]: https://github.com/cockroachdb/cockroach/pull/110206 -[#110273]: https://github.com/cockroachdb/cockroach/pull/110273 -[#110562]: https://github.com/cockroachdb/cockroach/pull/110562 diff --git a/src/current/_includes/releases/v23.1/v23.1.12.md b/src/current/_includes/releases/v23.1/v23.1.12.md index dc4729fa249..877a2b91090 100644 --- a/src/current/_includes/releases/v23.1/v23.1.12.md +++ b/src/current/_includes/releases/v23.1/v23.1.12.md @@ -6,83 +6,83 @@ Release Date: November 13, 2023

Security updates

-- The `SIGHUP` signal now clears the cached expiration times for [client certificates]({% link v23.1/cockroach-cert.md %}#how-security-certificates-work) that are reported by the `security.certificate.expiration.client` metric. [#111006][#111006] -- SQL commands that were previously limited to the `admin` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) can now be used by users with the `VIEWCLUSTERMETADATA` or `REPAIRCLUSTERMETADATA` system privilege, depending on whether the operation is read-only or modifies state. [#111131][#111131] +- The `SIGHUP` signal now clears the cached expiration times for [client certificates]({% link v23.1/cockroach-cert.md %}#how-security-certificates-work) that are reported by the `security.certificate.expiration.client` metric. #111006 +- SQL commands that were previously limited to the `admin` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) can now be used by users with the `VIEWCLUSTERMETADATA` or `REPAIRCLUSTERMETADATA` system privilege, depending on whether the operation is read-only or modifies state. #111131

General changes

-- The maximum permitted value of the `COCKROACH_RPC_INITIAL_WINDOW_SIZE` environment variable has been increased to `64MB`. By increasing this environment variable, in conjunction with tuning OS-level maximum TCP window size, you can increase the throughput that [Raft replication](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#raft) can sustain over high latency network links. [#111287][#111287] -- Updated Go version to 1.19.13. [#112204][#112204] +- The maximum permitted value of the `COCKROACH_RPC_INITIAL_WINDOW_SIZE` environment variable has been increased to `64MB`. By increasing this environment variable, in conjunction with tuning OS-level maximum TCP window size, you can increase the throughput that [Raft replication](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#raft) can sustain over high latency network links. #111287 +- Updated Go version to 1.19.13. #112204

SQL language changes

-- New datetime built-ins (`make_date`, `make_timestamp`, and `make_timestamptz`) have been added, allowing for the creation of timestamps, timestamps with time zones, and dates. In addition, `date_trunc` now allows for a timestamp to be truncated in a specified timezone (to a specified precision). [#110338][#110338] -- New system privileges `CREATEROLE`, `CREATELOGIN`, `CREATEDB`, and `CONTROLJOB` have been introduced. Each is analogous to its existing [role option]({% link v23.1/create-user.md %}#role-options) counterpart but can additionally be inherited through role membership. [#110359][#110359], [#110359][#110359], [#110359][#110359], [#110359][#110359] -- [`RESTORE`]({% link v23.1/restore.md %}) can now be passed a `WITH EXECUTION LOCALITY` option similar to `BACKUP` to restrict execution of the job to nodes with matching localities. [#110611][#110611] -- The `statement_activity` and `transaction_activity` table column `execution_total_cluster_seconds` is now accurate. The `combinedstmts` endpoint returns the correct value for the `StmtsTotalRuntimeSecs` and `TxnsTotalRuntimeSecs` properties. [#109639][#109639] -- The `discard` [log message]({% link v23.1/logging-overview.md %}) is now limited to once per minute by default. The message now includes both the number of transactions and the number of statements that were discarded. [#110983][#110983] +- New datetime built-ins (`make_date`, `make_timestamp`, and `make_timestamptz`) have been added, allowing for the creation of timestamps, timestamps with time zones, and dates. In addition, `date_trunc` now allows for a timestamp to be truncated in a specified timezone (to a specified precision). #110338 +- New system privileges `CREATEROLE`, `CREATELOGIN`, `CREATEDB`, and `CONTROLJOB` have been introduced. Each is analogous to its existing [role option]({% link v23.1/create-user.md %}#role-options) counterpart but can additionally be inherited through role membership. #110359, #110359, #110359, #110359 +- [`RESTORE`]({% link v23.1/restore.md %}) can now be passed a `WITH EXECUTION LOCALITY` option similar to `BACKUP` to restrict execution of the job to nodes with matching localities. #110611 +- The `statement_activity` and `transaction_activity` table column `execution_total_cluster_seconds` is now accurate. The `combinedstmts` endpoint returns the correct value for the `StmtsTotalRuntimeSecs` and `TxnsTotalRuntimeSecs` properties. #109639 +- The `discard` [log message]({% link v23.1/logging-overview.md %}) is now limited to once per minute by default. The message now includes both the number of transactions and the number of statements that were discarded. #110983

Operational changes

-- The [`cockroach debug zip`](https://www.cockroachlabs.com/docs/v23.1/cockroach-debug-zip.md) command now has an option to omit goroutine stack dumps. This impacts the creation of `nodes/*/stacks.txt` and `nodes/*/stacks_with_labels.txt` within debug ZIP bundles. Users can opt to exclude these goroutine stacks by using the `--include-goroutine-stacks=false` flag. Fetching stack traces for all goroutines is a "stop-the-world" operation, which can momentarily but significantly increase SQL service latency. Any periodic goroutine dumps previously taken on the node will still be included in `nodes/*/goroutines/*.txt.gz`, since they have already been generated and don't require any stop-the-world operations. [#110266][#110266] -- Requests for database details or table details from the UI, or usages of `SHOW RANGES WITH DETAILS` are no longer subject to errors if the number of requested spans is too large. [#109902][#109902] -- Added a new [metric]({% link v23.1/metrics.md %}) `changefeed.lagging_ranges` that shows the number of [ranges](https://www.cockroachlabs.com/docs/v22.2/architecture/overview#architecture-range) which are behind in changefeeds. This metric can be used with the [`metrics_label` changefeed option]({% link v23.1/create-changefeed.md %}#options). The calculation of this metric is controlled by two new [cluster settings]({% link v23.1/cluster-settings.md %}): (1) `lagging_ranges_threshold`, with a default of 3 minutes, is the amount of time a range needs to be behind to be considered lagging, and (2) `lagging_ranges_polling_interval`, with a default of 1 minute, controls how often the lagging ranges calculation is done. Note that polling adds latency to the metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterwards. Also note that ranges undergoing an initial scan for longer than the threshold are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. However, as ranges complete the initial scan, the number of lagging ranges will decrease. [#110963][#110963] -- The [DB Console]({% link v23.1/ui-overview.md %}) now constructs client-side requests using relative URLs instead of absolute ones. This enables proxying of the DB Console at arbitrary subpaths. [#111652][#111652] -- Added a new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.http.base_path` that controls the redirection of the browser after successful login with [OIDC SSO]({% link v23.1/sso-db-console.md %}). Most users do not need to modify this setting. However, it is helpful in cases where CockroachDB is running behind a load balancer or proxy that serves CockroachDB under a subpath, such as `https:// /crdb/ `. In those cases, it is necessary for the browser to redirect to `/ crdb` after login instead of `/`, which has always been the hard-coded default.[#112038][#112038] +- The [`cockroach debug zip`](https://www.cockroachlabs.com/docs/v23.1/cockroach-debug-zip.md) command now has an option to omit goroutine stack dumps. This impacts the creation of `nodes/*/stacks.txt` and `nodes/*/stacks_with_labels.txt` within debug ZIP bundles. Users can opt to exclude these goroutine stacks by using the `--include-goroutine-stacks=false` flag. Fetching stack traces for all goroutines is a "stop-the-world" operation, which can momentarily but significantly increase SQL service latency. Any periodic goroutine dumps previously taken on the node will still be included in `nodes/*/goroutines/*.txt.gz`, since they have already been generated and don't require any stop-the-world operations. #110266 +- Requests for database details or table details from the UI, or usages of `SHOW RANGES WITH DETAILS` are no longer subject to errors if the number of requested spans is too large. #109902 +- Added a new [metric]({% link v23.1/metrics.md %}) `changefeed.lagging_ranges` that shows the number of [ranges](https://www.cockroachlabs.com/docs/v22.2/architecture/overview#architecture-range) which are behind in changefeeds. This metric can be used with the [`metrics_label` changefeed option]({% link v23.1/create-changefeed.md %}#options). The calculation of this metric is controlled by two new [cluster settings]({% link v23.1/cluster-settings.md %}): (1) `lagging_ranges_threshold`, with a default of 3 minutes, is the amount of time a range needs to be behind to be considered lagging, and (2) `lagging_ranges_polling_interval`, with a default of 1 minute, controls how often the lagging ranges calculation is done. Note that polling adds latency to the metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update until an additional minute afterwards. Also note that ranges undergoing an initial scan for longer than the threshold are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. However, as ranges complete the initial scan, the number of lagging ranges will decrease. #110963 +- The [DB Console]({% link v23.1/ui-overview.md %}) now constructs client-side requests using relative URLs instead of absolute ones. This enables proxying of the DB Console at arbitrary subpaths. #111652 +- Added a new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.http.base_path` that controls the redirection of the browser after successful login with [OIDC SSO]({% link v23.1/sso-db-console.md %}). Most users do not need to modify this setting. However, it is helpful in cases where CockroachDB is running behind a load balancer or proxy that serves CockroachDB under a subpath, such as `https:// /crdb/ `. In those cases, it is necessary for the browser to redirect to `/ crdb` after login instead of `/`, which has always been the hard-coded default.#112038

DB Console changes

-- Non-admin users can now view the [**Database**, **Database Details**, and **Database table** pages]({% link v23.1/ui-databases-page.md %}). [#110342][#110342] -- The [SQL Connection Rate metric]({% link v23.1/ui-sql-dashboard.md %}#sql-connection-rate) on the [SQL Dashboard]({% link v23.1/ui-sql-dashboard.md %}) is downsampled using the MAX function instead of SUM. This improves situations where zooming out would cause the connection rate to increase for downsampled data. [#110497][#110497] -- Fixed a bug in DB Console's [Statement Diagnostic page]({% link cockroachcloud/statements-page.md %}#diagnostics) that could cause the page to crash if the response was larger than 50 KB. The page now keeps pulling results until no maximum size errors are encountered. [#111278][#111278] -- Fixed an error on the [SQL Activity page]({% link v23.1/ui-overview.md %}#sql-activity) when there was a workload, and then the workload stopped so that no queries ran against the database in the last hour. [#111496][#111496] -- The [**Jobs** table]({% link v23.1/ui-jobs-page.md %}) will now correctly display timestamps for creation, last modified, and the completed time fields. [#111901][#111901] -- The `Reset Sql Stats` button is now visible to users with the admin role on the DB Console. [#110256][#110256] -- Reduced memory usage in [SQL activity]({% link cockroachcloud/statements-page.md %}) jobs. [#112323][#112323] +- Non-admin users can now view the [**Database**, **Database Details**, and **Database table** pages]({% link v23.1/ui-databases-page.md %}). #110342 +- The [SQL Connection Rate metric]({% link v23.1/ui-sql-dashboard.md %}#sql-connection-rate) on the [SQL Dashboard]({% link v23.1/ui-sql-dashboard.md %}) is downsampled using the MAX function instead of SUM. This improves situations where zooming out would cause the connection rate to increase for downsampled data. #110497 +- Fixed a bug in DB Console's [Statement Diagnostic page]({% link cockroachcloud/statements-page.md %}#diagnostics) that could cause the page to crash if the response was larger than 50 KB. The page now keeps pulling results until no maximum size errors are encountered. #111278 +- Fixed an error on the [SQL Activity page]({% link v23.1/ui-overview.md %}#sql-activity) when there was a workload, and then the workload stopped so that no queries ran against the database in the last hour. #111496 +- The [**Jobs** table]({% link v23.1/ui-jobs-page.md %}) will now correctly display timestamps for creation, last modified, and the completed time fields. #111901 +- The `Reset Sql Stats` button is now visible to users with the admin role on the DB Console. #110256 +- Reduced memory usage in [SQL activity]({% link cockroachcloud/statements-page.md %}) jobs. #112323

Bug fixes

-- The new backup option `updates_cluster_monitoring_metrics` tracks the timestamp of the last backup failure due to a KMS error. This option is disabled by default. [#111310][#111310] -- Fixed a bug where vectorized `COPY FROM` could produce a plan with more than one RenderNodes, when only zero or one should be allowed. This could result in multiple render nodes in a table with a hash sharded primary key. [#111412][#111412] -- Fixed a bug that caused CockroachDB to stop collecting new statistics about [Statement fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprint-page) and [Transaction fingerprints]({% link v23.1/ui-transactions-page.md %}). [#111641][#111641] -- Fixed a bug where, internally, printing a 0 decimal with a very low exponent uses excessive memory. The [DECIMAL](https://www.cockroachlabs.com/docs/v23.1/decimal.md) type type is not impacted, but `crdb_internal` functions may be. [#110564][#110564] -- Fixed a bug where executing the [`EXPORT INTO PARQUET`]({% link v23.1/export.md %}#export-a-table-into-parquet) statement could cause an [out-of-memory crash (OOM)]({% link v23.1/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). Now, if memory is exceeded, the `EXPORT INTO PARQUET` statement returns an error. If you see an error related to memory, retry the `EXPORT INTO PARQUET` statement using a smaller value for the [`chunk_rows` option]({% link v23.1/export.md %}#export-options). Cockroach Labs [recommends using changefeeds to export data]({% link v23.1/export-data-with-changefeeds.md %}) because they provide better performance for growing workloads. Additionally, changefeeds [operate as jobs]({% link v23.1/show-jobs.md %}), which offer [observability]({% link v23.1/monitor-and-debug-changefeeds.md %}) and [job management]({% link v23.1/create-and-configure-changefeeds.md %}). [#110717][#110717] [#110717][#110717] -- Added limited `statement_statistics` to the `debug.zip` output. [#110662][#110662] -- Fixed a nil dereference panic during node startup that could be caused by an incorrect initialization order. [#109684][#109684] -- Fixed a bug where the [`BEGIN` statement]({% link v23.1/begin-transaction.md %}) incorrectly displayed a transaction ID in telemetry logs. It now correctly shows no transaction ID, since there is no open transaction when `BEGIN` is executed.[#109840][#109840] -- Fixed a bug that could cause a transaction performing multiple parallel foreign key checks to return a `concurrent txn use detected` error. [#109849][#109849] -- Fixed a bug where dependencies on sequences from tables would be reported with the wrong value for the `classid` column in the `pg_catalog.pg_depend` table. [#110207][#110207] -- Fixed edge cases in decimal and float evaluation for division operators. `'NaN'::DECIMAL / 0` will now return `NaN` instead of a division-by-zero error, and `0 / 'inf'::DECIMAL` will return `0` instead of `0E-2019`. [#110296][#110296] -- Fixed a DB Console issue where the `DROP_UNUSED` index recommendations produced by the table details page produced an invalid `DROP INDEX` statement. [#110453][#110453] -- Executing two [`ALTER RANGE default CONFIGURE ZONE`]({% link v23.1/alter-range.md %}#configure-zone) statements on the same line no longer triggers an error. [#110337][#110337] -- Removed a buggy TTL descriptor repair. Previously, upgrading from v22.2.x to 23.1.9 incorrectly removed [TTL storage parameters]({% link v23.1/row-level-ttl.md %}#ttl-storage-parameters) from tables (visible via [`SHOW CREATE TABLE `]({% link v23.1/show-create.md %})) while attempting to repair table descriptors. A node that attempts to run the TTL [job](https://www.cockroachlabs.com/docs/v23.1/show-jobs.html) could crash due to a panic caused by the missing TTL storage parameters. Clusters currently on v22.2.x should **not** be upgraded to v23.1.9, but should move directly to v23.1.10 or later. For more information, refer to [Technical Advisory 110363](../advisories/a110363.html). [#110500][#110500] -- Fixed a performance regression when disabling `sql.metrics.statement_details.enabled`, which caused execution stats to be collected for all queries instead of the default 1% of queries. [#109881][#109881] -- `cockroach debug pebble` commands now work correctly with encrypted stores which don't use the default `cockroach-data` path, and you no longer need to pass the `--store` option. [#110507][#110507] -- The [**Database** pages]({% link cockroachcloud/databases-page.md %}) no longer displays `undefined` regions or outdated node information. [#110741][#110741] -- Fixed a bug where `RESET (ttl_expire_after)` could incorrectly remove `ttl_expiration_expression`. [#110746][#110746] -- Fixed a bug where an [`ALTER TABLE ... ADD CONSTRAINT CHECK ...`]({% link v23.1/alter-table.md %}#add-constraint) statement with a user-defined function (UDF) in the `CHECK` could cause a validation error. [#110720][#110720] -- Fixed a bug where `CREATE INDEX` for [partial indexes]({% link v23.1/partial-indexes.md %}) could fail with `ERROR: duplicate key value violates unique constraint` if concurrent inserts happened simultaneously. [#110584][#110584] -- Fixed a bug where a [`CREATE TABLE`]({% link v23.1/create-table.md %}) command with an `IDENTITY` column did not properly propagate the type of the column into the sequence. [#111014][#111014] -- Fixed a bug where the `format_type` built-in did not honor `typemod` information for array types, leading to incorrect output. [#110940][#110940] -- Fixed compaction behavior to prevent rapid sublevel growth when removing replicas from a store, such as during decommissioning. [#111141][#111141] -- Fixed a bug that could occur when the [multiple active portals]({% link v23.1/postgresql-compatibility.md %}#multiple-active-portals) execution mode (Preview) was enabled to evaluate queries such as lookup joins. The bug could result in an internal error like `unexpected 40960 leftover bytes` if the portal was not fully consumed. [#110666][#110666] -- External connection URLs now accept the scheme `azure-blob` for connections to Azure Blob Storage and the scheme `azure-kms` for connections to Azure KMS. For backward compatibility, schemes `azure` and `azure-storage` schemes continue to work for connections to Azure Blob Storage. [#111246][#111246] -- Fixed a bug where changing the setting `server.telemetry.hot_ranges_stats.interval` had no effect. [#111373][#111373] -- Added a check for values before using `mean` on the DB Console [Plan Details page]({% link v23.1/ui-statements-page.md %}), fixing a crash. [#111505][#111505] -- Fixed a bug where dependencies on sequences from tables would be reported with the wrong value for the `classid` column in the `pg_catalog.pg_depend` table. [#111600][#111600] -- Fixed a bug where `atttypmod` in `pg_attribute` was not populated for [`TIMESTAMP`]({% link v23.1/timestamp.md %}) / [`INTERVAL`]({% link v23.1/interval.md %}) types, which meant that ORMs could not know the precision of these types properly. [#111727][#111727] -- Fixed a bug in the DB Console [Transaction Insight Details]({% link v23.1/ui-insights-page.md %}) page , which showed contention details of other transactions. Now, CockroachDB will only surface contention details for the current transaction. [#111880][#111880] -- Fixed a bug where `indoption` inside `pg_index` was not properly encoded. Clients were unable to decode it as `int2vector`. [#111957][#111957] -- RPC failures on writes now use the parallel commit protocol and execute in parallel to the commit operation. This change prevents incorrect retryable failures and `transaction unexpectedly committed` assertions by detecting when writes cannot be retried idempotently, instead returning an `AmbiguousResultError`. [#111876][#111876] -- Fixed a bug that prevented the [optimizer]({% link v23.1/cost-based-optimizer.md %}) from honoring the `statement_timeout` session setting when generating constrained index scans for queries with large `IN` lists or `= ANY` predicates on multiple index key columns. This bug could cause an Out-of-Memory (OOM) condition on the node. [#112076][#112076] -- Fixed a bug that caused internal errors during query optimization in rare cases. The bug has been present since version v2.1.11, but it is more likely to occur in version v21.2.0 and later, though it is still rare. The bug only presents when a query contains `min` and `max` [aggregate functions]({% link v23.1/functions-and-operators.md %}#aggregate-functions) [#112254][#112254] -- Fixed a bug where a lookup or index join on a table with at least three column families could be evaluated incorrectly, leading to a "non-nullable column with no value" error or incorrect query results. The bug was introduced in v22.2. [#113107][#113107] -- Fixed a bug that could cause internal errors or panics while attempting to forecast statistics on a numeric column. [#113881][#113881] +- The new backup option `updates_cluster_monitoring_metrics` tracks the timestamp of the last backup failure due to a KMS error. This option is disabled by default. #111310 +- Fixed a bug where vectorized `COPY FROM` could produce a plan with more than one RenderNodes, when only zero or one should be allowed. This could result in multiple render nodes in a table with a hash sharded primary key. #111412 +- Fixed a bug that caused CockroachDB to stop collecting new statistics about [Statement fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprint-page) and [Transaction fingerprints]({% link v23.1/ui-transactions-page.md %}). #111641 +- Fixed a bug where, internally, printing a 0 decimal with a very low exponent uses excessive memory. The [DECIMAL](https://www.cockroachlabs.com/docs/v23.1/decimal.md) type type is not impacted, but `crdb_internal` functions may be. #110564 +- Fixed a bug where executing the [`EXPORT INTO PARQUET`]({% link v23.1/export.md %}#export-a-table-into-parquet) statement could cause an [out-of-memory crash (OOM)]({% link v23.1/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). Now, if memory is exceeded, the `EXPORT INTO PARQUET` statement returns an error. If you see an error related to memory, retry the `EXPORT INTO PARQUET` statement using a smaller value for the [`chunk_rows` option]({% link v23.1/export.md %}#export-options). Cockroach Labs [recommends using changefeeds to export data]({% link v23.1/export-data-with-changefeeds.md %}) because they provide better performance for growing workloads. Additionally, changefeeds [operate as jobs]({% link v23.1/show-jobs.md %}), which offer [observability]({% link v23.1/monitor-and-debug-changefeeds.md %}) and [job management]({% link v23.1/create-and-configure-changefeeds.md %}). #110717 #110717 +- Added limited `statement_statistics` to the `debug.zip` output. #110662 +- Fixed a nil dereference panic during node startup that could be caused by an incorrect initialization order. #109684 +- Fixed a bug where the [`BEGIN` statement]({% link v23.1/begin-transaction.md %}) incorrectly displayed a transaction ID in telemetry logs. It now correctly shows no transaction ID, since there is no open transaction when `BEGIN` is executed.#109840 +- Fixed a bug that could cause a transaction performing multiple parallel foreign key checks to return a `concurrent txn use detected` error. #109849 +- Fixed a bug where dependencies on sequences from tables would be reported with the wrong value for the `classid` column in the `pg_catalog.pg_depend` table. #110207 +- Fixed edge cases in decimal and float evaluation for division operators. `'NaN'::DECIMAL / 0` will now return `NaN` instead of a division-by-zero error, and `0 / 'inf'::DECIMAL` will return `0` instead of `0E-2019`. #110296 +- Fixed a DB Console issue where the `DROP_UNUSED` index recommendations produced by the table details page produced an invalid `DROP INDEX` statement. #110453 +- Executing two [`ALTER RANGE default CONFIGURE ZONE`]({% link v23.1/alter-range.md %}#configure-zone) statements on the same line no longer triggers an error. #110337 +- Removed a buggy TTL descriptor repair. Previously, upgrading from v22.2.x to 23.1.9 incorrectly removed [TTL storage parameters]({% link v23.1/row-level-ttl.md %}#ttl-storage-parameters) from tables (visible via [`SHOW CREATE TABLE `]({% link v23.1/show-create.md %})) while attempting to repair table descriptors. A node that attempts to run the TTL [job](https://www.cockroachlabs.com/docs/v23.1/show-jobs.html) could crash due to a panic caused by the missing TTL storage parameters. Clusters currently on v22.2.x should **not** be upgraded to v23.1.9, but should move directly to v23.1.10 or later. For more information, refer to [Technical Advisory 110363](../advisories/a110363.html). #110500 +- Fixed a performance regression when disabling `sql.metrics.statement_details.enabled`, which caused execution stats to be collected for all queries instead of the default 1% of queries. #109881 +- `cockroach debug pebble` commands now work correctly with encrypted stores which don't use the default `cockroach-data` path, and you no longer need to pass the `--store` option. #110507 +- The [**Database** pages]({% link cockroachcloud/databases-page.md %}) no longer displays `undefined` regions or outdated node information. #110741 +- Fixed a bug where `RESET (ttl_expire_after)` could incorrectly remove `ttl_expiration_expression`. #110746 +- Fixed a bug where an [`ALTER TABLE ... ADD CONSTRAINT CHECK ...`]({% link v23.1/alter-table.md %}#add-constraint) statement with a user-defined function (UDF) in the `CHECK` could cause a validation error. #110720 +- Fixed a bug where `CREATE INDEX` for [partial indexes]({% link v23.1/partial-indexes.md %}) could fail with `ERROR: duplicate key value violates unique constraint` if concurrent inserts happened simultaneously. #110584 +- Fixed a bug where a [`CREATE TABLE`]({% link v23.1/create-table.md %}) command with an `IDENTITY` column did not properly propagate the type of the column into the sequence. #111014 +- Fixed a bug where the `format_type` built-in did not honor `typemod` information for array types, leading to incorrect output. #110940 +- Fixed compaction behavior to prevent rapid sublevel growth when removing replicas from a store, such as during decommissioning. #111141 +- Fixed a bug that could occur when the [multiple active portals]({% link v23.1/postgresql-compatibility.md %}#multiple-active-portals) execution mode (Preview) was enabled to evaluate queries such as lookup joins. The bug could result in an internal error like `unexpected 40960 leftover bytes` if the portal was not fully consumed. #110666 +- External connection URLs now accept the scheme `azure-blob` for connections to Azure Blob Storage and the scheme `azure-kms` for connections to Azure KMS. For backward compatibility, schemes `azure` and `azure-storage` schemes continue to work for connections to Azure Blob Storage. #111246 +- Fixed a bug where changing the setting `server.telemetry.hot_ranges_stats.interval` had no effect. #111373 +- Added a check for values before using `mean` on the DB Console [Plan Details page]({% link v23.1/ui-statements-page.md %}), fixing a crash. #111505 +- Fixed a bug where dependencies on sequences from tables would be reported with the wrong value for the `classid` column in the `pg_catalog.pg_depend` table. #111600 +- Fixed a bug where `atttypmod` in `pg_attribute` was not populated for [`TIMESTAMP`]({% link v23.1/timestamp.md %}) / [`INTERVAL`]({% link v23.1/interval.md %}) types, which meant that ORMs could not know the precision of these types properly. #111727 +- Fixed a bug in the DB Console [Transaction Insight Details]({% link v23.1/ui-insights-page.md %}) page , which showed contention details of other transactions. Now, CockroachDB will only surface contention details for the current transaction. #111880 +- Fixed a bug where `indoption` inside `pg_index` was not properly encoded. Clients were unable to decode it as `int2vector`. #111957 +- RPC failures on writes now use the parallel commit protocol and execute in parallel to the commit operation. This change prevents incorrect retryable failures and `transaction unexpectedly committed` assertions by detecting when writes cannot be retried idempotently, instead returning an `AmbiguousResultError`. #111876 +- Fixed a bug that prevented the [optimizer]({% link v23.1/cost-based-optimizer.md %}) from honoring the `statement_timeout` session setting when generating constrained index scans for queries with large `IN` lists or `= ANY` predicates on multiple index key columns. This bug could cause an Out-of-Memory (OOM) condition on the node. #112076 +- Fixed a bug that caused internal errors during query optimization in rare cases. The bug has been present since version v2.1.11, but it is more likely to occur in version v21.2.0 and later, though it is still rare. The bug only presents when a query contains `min` and `max` [aggregate functions]({% link v23.1/functions-and-operators.md %}#aggregate-functions) #112254 +- Fixed a bug where a lookup or index join on a table with at least three column families could be evaluated incorrectly, leading to a "non-nullable column with no value" error or incorrect query results. The bug was introduced in v22.2. #113107 +- Fixed a bug that could cause internal errors or panics while attempting to forecast statistics on a numeric column. #113881

Performance improvements

-- Queries that compare collated strings now use less memory and may execute faster. [#110147][#110147] -- Reduced the impact of high-concurrency blind writes to the same key on goroutine scheduling latency. [#109370][#109370] +- Queries that compare collated strings now use less memory and may execute faster. #110147 +- Reduced the impact of high-concurrency blind writes to the same key on goroutine scheduling latency. #109370
@@ -92,70 +92,3 @@ This release includes 207 merged PRs by 64 authors.
-[#109370]: https://github.com/cockroachdb/cockroach/pull/109370 -[#109639]: https://github.com/cockroachdb/cockroach/pull/109639 -[#109684]: https://github.com/cockroachdb/cockroach/pull/109684 -[#109840]: https://github.com/cockroachdb/cockroach/pull/109840 -[#109849]: https://github.com/cockroachdb/cockroach/pull/109849 -[#109881]: https://github.com/cockroachdb/cockroach/pull/109881 -[#109902]: https://github.com/cockroachdb/cockroach/pull/109902 -[#110147]: https://github.com/cockroachdb/cockroach/pull/110147 -[#110207]: https://github.com/cockroachdb/cockroach/pull/110207 -[#110246]: https://github.com/cockroachdb/cockroach/pull/110246 -[#110256]: https://github.com/cockroachdb/cockroach/pull/110256 -[#110266]: https://github.com/cockroachdb/cockroach/pull/110266 -[#110296]: https://github.com/cockroachdb/cockroach/pull/110296 -[#110337]: https://github.com/cockroachdb/cockroach/pull/110337 -[#110338]: https://github.com/cockroachdb/cockroach/pull/110338 -[#110342]: https://github.com/cockroachdb/cockroach/pull/110342 -[#110359]: https://github.com/cockroachdb/cockroach/pull/110359 -[#110369]: https://github.com/cockroachdb/cockroach/pull/110369 -[#110453]: https://github.com/cockroachdb/cockroach/pull/110453 -[#110497]: https://github.com/cockroachdb/cockroach/pull/110497 -[#110500]: https://github.com/cockroachdb/cockroach/pull/110500 -[#110507]: https://github.com/cockroachdb/cockroach/pull/110507 -[#110564]: https://github.com/cockroachdb/cockroach/pull/110564 -[#110584]: https://github.com/cockroachdb/cockroach/pull/110584 -[#110588]: https://github.com/cockroachdb/cockroach/pull/110588 -[#110611]: https://github.com/cockroachdb/cockroach/pull/110611 -[#110662]: https://github.com/cockroachdb/cockroach/pull/110662 -[#110666]: https://github.com/cockroachdb/cockroach/pull/110666 -[#110680]: https://github.com/cockroachdb/cockroach/pull/110680 -[#110717]: https://github.com/cockroachdb/cockroach/pull/110717 -[#110720]: https://github.com/cockroachdb/cockroach/pull/110720 -[#110741]: https://github.com/cockroachdb/cockroach/pull/110741 -[#110746]: https://github.com/cockroachdb/cockroach/pull/110746 -[#110940]: https://github.com/cockroachdb/cockroach/pull/110940 -[#110963]: https://github.com/cockroachdb/cockroach/pull/110963 -[#110983]: https://github.com/cockroachdb/cockroach/pull/110983 -[#111006]: https://github.com/cockroachdb/cockroach/pull/111006 -[#111014]: https://github.com/cockroachdb/cockroach/pull/111014 -[#111058]: https://github.com/cockroachdb/cockroach/pull/111058 -[#111075]: https://github.com/cockroachdb/cockroach/pull/111075 -[#111131]: https://github.com/cockroachdb/cockroach/pull/111131 -[#111141]: https://github.com/cockroachdb/cockroach/pull/111141 -[#111246]: https://github.com/cockroachdb/cockroach/pull/111246 -[#111278]: https://github.com/cockroachdb/cockroach/pull/111278 -[#111287]: https://github.com/cockroachdb/cockroach/pull/111287 -[#111310]: https://github.com/cockroachdb/cockroach/pull/111310 -[#111373]: https://github.com/cockroachdb/cockroach/pull/111373 -[#111412]: https://github.com/cockroachdb/cockroach/pull/111412 -[#111496]: https://github.com/cockroachdb/cockroach/pull/111496 -[#111505]: https://github.com/cockroachdb/cockroach/pull/111505 -[#111600]: https://github.com/cockroachdb/cockroach/pull/111600 -[#111641]: https://github.com/cockroachdb/cockroach/pull/111641 -[#111652]: https://github.com/cockroachdb/cockroach/pull/111652 -[#111727]: https://github.com/cockroachdb/cockroach/pull/111727 -[#111876]: https://github.com/cockroachdb/cockroach/pull/111876 -[#111880]: https://github.com/cockroachdb/cockroach/pull/111880 -[#111901]: https://github.com/cockroachdb/cockroach/pull/111901 -[#111957]: https://github.com/cockroachdb/cockroach/pull/111957 -[#112038]: https://github.com/cockroachdb/cockroach/pull/112038 -[#112076]: https://github.com/cockroachdb/cockroach/pull/112076 -[#112204]: https://github.com/cockroachdb/cockroach/pull/112204 -[#112254]: https://github.com/cockroachdb/cockroach/pull/112254 -[#112323]: https://github.com/cockroachdb/cockroach/pull/112323 -[#113107]: https://github.com/cockroachdb/cockroach/pull/113107 -[#113881]: https://github.com/cockroachdb/cockroach/pull/113881 -[5b78f1812]: https://github.com/cockroachdb/cockroach/commit/5b78f1812 -[d3ec7909c]: https://github.com/cockroachdb/cockroach/commit/d3ec7909c diff --git a/src/current/_includes/releases/v23.1/v23.1.13.md b/src/current/_includes/releases/v23.1/v23.1.13.md index 240b565113b..fe5c6a00e2b 100644 --- a/src/current/_includes/releases/v23.1/v23.1.13.md +++ b/src/current/_includes/releases/v23.1/v23.1.13.md @@ -8,46 +8,46 @@ Release Date: December 11, 2023 - A new option for the [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) syntax, `strip_localities`, has been added. This can be used to strip the [locality information](https://www.cockroachlabs.com/docs/v23.1/alter-table.html#set-locality) from a backup when there are mismatched [cluster regions](https://www.cockroachlabs.com/docs/v23.1/multiregion-overview.html#cluster-regions) between the backup's cluster and the target cluster. The following are behaviors that will most likely not be encountered with the specific use case that this patch provides, but are documented nonetheless: - Adding a [primary region](https://www.cockroachlabs.com/docs/v23.1/alter-database.html#set-primary-region) to a regionless restore (with or without [regional by row table(s)](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables)) will not work out-of-the-box, but does produce an accurate message instructing a user to [`DROP TYPE {database}.public.crdb_internal_region;`](https://www.cockroachlabs.com/docs/v23.1/drop-type.html) (and for cluster restores, [`ALTER DATABASE {database} CONFIGURE ZONE DISCARD;`](https://www.cockroachlabs.com/docs/v23.1/alter-database.html#remove-a-replication-zone)). - - Restoring a cluster/database/table with a [regional by row table](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables) will not work out-of-the box. In particular, when performing writes, the [`crdb_region` column](https://www.cockroachlabs.com/docs/v23.1/alter-table.html#crdb_region) needs to specify the region of the new row(s) being written to the table. The user will need to alter said column and set a default that makes sense, along with discarding the [zone configuration](https://www.cockroachlabs.com/docs/v23.1/configure-replication-zones.html) (this latter is due to the fact that the zone config holds all outdated info related to the [partitions](https://www.cockroachlabs.com/docs/v23.1/partitioning.html), [constraints](https://www.cockroachlabs.com/docs/v23.1/constraints.html), etc.). These are due to a conflict with the `crdb_region` column already being present in the regionless restore. This column specifies each row's home region and is a prefix to the table's primary key. Stripping localities does not touch this column as it would be an expensive operation that includes rewriting the entire table. [#111863][#111863] -- The [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) option `strip_localities` has been renamed to `remove_regions`. [#111863][#111863] -- You can no longer perform a [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) with the `remove_regions` option if the object being restored contains a [regional by row table](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables). [#111863][#111863] -- Added a [builtin function](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators.html) `jsonb_array_to_string_array` that converts a [JSONB](https://www.cockroachlabs.com/docs/v23.1/jsonb.html) array to a string array. [#112864][#112864] -- Updated the [builtin function](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators.html) `jsonb_array_to_string_array` to return [_NULL_](https://www.cockroachlabs.com/docs/v23.1/null-handling.html) objects. Previously, they were removed from the output. [#112864][#112864] -- Fixed the [**SQL Activity** page](https://www.cockroachlabs.com/docs/v23.1/monitoring-and-alerting.html#sql-activity-pages) update job to avoid conflicts on update, reduce the amount of data cached to just what the overview page requires, and fix the correctness of the top queries. [#112864][#112864] + - Restoring a cluster/database/table with a [regional by row table](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables) will not work out-of-the box. In particular, when performing writes, the [`crdb_region` column](https://www.cockroachlabs.com/docs/v23.1/alter-table.html#crdb_region) needs to specify the region of the new row(s) being written to the table. The user will need to alter said column and set a default that makes sense, along with discarding the [zone configuration](https://www.cockroachlabs.com/docs/v23.1/configure-replication-zones.html) (this latter is due to the fact that the zone config holds all outdated info related to the [partitions](https://www.cockroachlabs.com/docs/v23.1/partitioning.html), [constraints](https://www.cockroachlabs.com/docs/v23.1/constraints.html), etc.). These are due to a conflict with the `crdb_region` column already being present in the regionless restore. This column specifies each row's home region and is a prefix to the table's primary key. Stripping localities does not touch this column as it would be an expensive operation that includes rewriting the entire table. #111863 +- The [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) option `strip_localities` has been renamed to `remove_regions`. #111863 +- You can no longer perform a [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) with the `remove_regions` option if the object being restored contains a [regional by row table](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables). #111863 +- Added a [builtin function](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators.html) `jsonb_array_to_string_array` that converts a [JSONB](https://www.cockroachlabs.com/docs/v23.1/jsonb.html) array to a string array. #112864 +- Updated the [builtin function](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators.html) `jsonb_array_to_string_array` to return [_NULL_](https://www.cockroachlabs.com/docs/v23.1/null-handling.html) objects. Previously, they were removed from the output. #112864 +- Fixed the [**SQL Activity** page](https://www.cockroachlabs.com/docs/v23.1/monitoring-and-alerting.html#sql-activity-pages) update job to avoid conflicts on update, reduce the amount of data cached to just what the overview page requires, and fix the correctness of the top queries. #112864

Operational changes

- Added [metrics](https://www.cockroachlabs.com/docs/v23.1/metrics.html) for [Raft](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer.html#raft) proposals and reproposals, specifically: - `raft.commands.proposed`: commands proposed to Raft by [leaseholders](https://www.cockroachlabs.com/docs/v23.1/architecture/overview.html#architecture-leaseholder). (Note that this metric includes both of the reproposed metrics below.) - `raft.commands.reproposed.unchanged`: commands retried/reproposed to Raft because they take too long to apply (so they might be dropped). - - `raft.commands.reproposed.new-lai`: commands retried/reproposed to Raft because they were committed to Raft out of order (failed the LAI (lease applied index) or [closed timestamp](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer.html#closed-timestamps) check). [#113153][#113153] -- Added a new [cluster setting](https://www.cockroachlabs.com/docs/v23.1/cluster-settings.html) `kv.gc.sticky_hint.enabled` that helps expedite [garbage collection](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer.html#garbage-collection) after range deletions, such as when a SQL table or index is dropped. This setting is disabled by default. [#110643][#110643] + - `raft.commands.reproposed.new-lai`: commands retried/reproposed to Raft because they were committed to Raft out of order (failed the LAI (lease applied index) or [closed timestamp](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer.html#closed-timestamps) check). #113153 +- Added a new [cluster setting](https://www.cockroachlabs.com/docs/v23.1/cluster-settings.html) `kv.gc.sticky_hint.enabled` that helps expedite [garbage collection](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer.html#garbage-collection) after range deletions, such as when a SQL table or index is dropped. This setting is disabled by default. #110643

DB Console changes

-- [DB Console](https://www.cockroachlabs.com/docs/v23.1/ui-overview.html) instances proxied at different subpaths that use [OIDC](https://www.cockroachlabs.com/docs/cockroachcloud/configure-cloud-org-sso.html#oidc) will now point to the correct relative path when attempting to use OIDC login. [#111290][#111290] +- [DB Console](https://www.cockroachlabs.com/docs/v23.1/ui-overview.html) instances proxied at different subpaths that use [OIDC](https://www.cockroachlabs.com/docs/cockroachcloud/configure-cloud-org-sso.html#oidc) will now point to the correct relative path when attempting to use OIDC login. #111290

Bug fixes

-- Fixed a bug where the `pg_attribute` could have sparsely populated `attnum`s since dropped columns were not included. Previously, the attribute number generated inside `pg_attribute` could be sparse because there would be gaps after columns were dropped. This could be problematic for ORMs since this would mean that attribute numbers could be sparse, and they may not be designed to handle such gaps. To address this, this patch adds dropped synthetic columns into the `pg_attribute` table, which allows these tools to work correctly. [#111119][#111119] -- Fixed a bug that could prevent [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) from working if it was performed during a [cluster upgrade](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroach-version.html). [#112758][#112758] -- Fixed a bug where queries with the `st_union` [aggregate function](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators.html#aggregate-functions) could produce incorrect results in some cases due to the [query optimizer](https://www.cockroachlabs.com/docs/v23.1/cost-based-optimizer.html) performing invalid optimizations. This bug has been present since the `st_union` function was introduced in v20.2.0. [#112795][#112795] -- A warning for [technical advisory 99561](https://www.cockroachlabs.com/docs/advisories/a99561) could incorrectly surface when [dropping secondary indexes](https://www.cockroachlabs.com/docs/v23.1/drop-index.html) that store [primary key](https://www.cockroachlabs.com/docs/v23.1/primary-key.html) columns. This is now fixed. [#112906][#112906] -- Fixed a bug where creating a [trigram index](https://www.cockroachlabs.com/docs/v23.1/trigram-indexes.html) and later displaying it via [`SHOW CREATE TABLE`](https://www.cockroachlabs.com/docs/v23.1/show-create.html), would not show the opclass for this trigram index. [#113077][#113077] -- Fixed a bug introduced in v22.2 where CockroachDB could incorrectly evaluate [lookup and index joins](https://www.cockroachlabs.com/docs/v23.1/joins.html) into tables with at least 3 [column families](https://www.cockroachlabs.com/docs/v23.1/column-families.html) in some cases (either a `non-nullable column with no value` internal error would occur, or the query would return incorrect results). [#113108][#113108] -- Fixed a bug that could occasionally cause [schema change jobs](https://www.cockroachlabs.com/docs/v23.1/online-schema-changes.html) (e.g., table/index drops) to appear stuck in state "waiting for MVCC GC" for much longer than expected. This fix only applies to future schema changes. Existing stuck jobs can be processed by manually force-enqueueing the relevant [ranges](https://www.cockroachlabs.com/docs/v23.1/architecture/overview.html#architecture-range) in the MVCC GC queue under the [DB Console's **Advanced Debug** page](https://www.cockroachlabs.com/docs/v23.1/ui-debug-pages.html). [#110643][#110643] -- Fixed a bug that could cause internal errors or panics while attempting to forecast [statistics](https://www.cockroachlabs.com/docs/v23.1/cost-based-optimizer.html#table-statistics) on a numeric column. [#113798][#113798] -- Fixed a bug where [`ALTER PRIMARY KEY`](https://www.cockroachlabs.com/docs/v23.1/alter-table.html#alter-primary-key) would incorrectly disable [secondary indexes](https://www.cockroachlabs.com/docs/v23.1/indexes.html) while new secondary indexes were being backfilled when using the [declarative schema changer](https://www.cockroachlabs.com/docs/v23.1/online-schema-changes.html). [#113183][#113183] -- Previously, when executing queries with [index / lookup joins](https://www.cockroachlabs.com/docs/v23.1/joins.html) when the ordering needs to be maintained, CockroachDB could in some cases inadvertently increase query latency, possibly by 1 or 2 orders of magnitude. This bug was introduced in v22.2 and is now fixed. [#114143][#114143] -- Fixed a bug where the [`SHOW STATISTICS`](https://www.cockroachlabs.com/docs/v23.1/show-statistics.html) command would incorrectly require the user to have the [admin role](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization.html#admin-role). It was intended to only require the user to have any privilege on the table being inspected. [#114479][#114479] -- Fixed a bug that could cause a query plan to skip scanning rows from the local region when performing a [lookup join](https://www.cockroachlabs.com/docs/v23.1/joins.html) with a [`REGIONAL BY ROW`](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables) table as the input. [#114456][#114456] -- Fixed a bug that could cause [`ALTER DATABASE ... ADD/DROP REGION`](https://www.cockroachlabs.com/docs/v23.1/alter-database.html#add-region) to hang if node localities were changed after regions were added. [#114197][#114197] +- Fixed a bug where the `pg_attribute` could have sparsely populated `attnum`s since dropped columns were not included. Previously, the attribute number generated inside `pg_attribute` could be sparse because there would be gaps after columns were dropped. This could be problematic for ORMs since this would mean that attribute numbers could be sparse, and they may not be designed to handle such gaps. To address this, this patch adds dropped synthetic columns into the `pg_attribute` table, which allows these tools to work correctly. #111119 +- Fixed a bug that could prevent [`RESTORE`](https://www.cockroachlabs.com/docs/v23.1/restore.html) from working if it was performed during a [cluster upgrade](https://www.cockroachlabs.com/docs/v23.1/upgrade-cockroach-version.html). #112758 +- Fixed a bug where queries with the `st_union` [aggregate function](https://www.cockroachlabs.com/docs/v23.1/functions-and-operators.html#aggregate-functions) could produce incorrect results in some cases due to the [query optimizer](https://www.cockroachlabs.com/docs/v23.1/cost-based-optimizer.html) performing invalid optimizations. This bug has been present since the `st_union` function was introduced in v20.2.0. #112795 +- A warning for [technical advisory 99561](https://www.cockroachlabs.com/docs/advisories/a99561) could incorrectly surface when [dropping secondary indexes](https://www.cockroachlabs.com/docs/v23.1/drop-index.html) that store [primary key](https://www.cockroachlabs.com/docs/v23.1/primary-key.html) columns. This is now fixed. #112906 +- Fixed a bug where creating a [trigram index](https://www.cockroachlabs.com/docs/v23.1/trigram-indexes.html) and later displaying it via [`SHOW CREATE TABLE`](https://www.cockroachlabs.com/docs/v23.1/show-create.html), would not show the opclass for this trigram index. #113077 +- Fixed a bug introduced in v22.2 where CockroachDB could incorrectly evaluate [lookup and index joins](https://www.cockroachlabs.com/docs/v23.1/joins.html) into tables with at least 3 [column families](https://www.cockroachlabs.com/docs/v23.1/column-families.html) in some cases (either a `non-nullable column with no value` internal error would occur, or the query would return incorrect results). #113108 +- Fixed a bug that could occasionally cause [schema change jobs](https://www.cockroachlabs.com/docs/v23.1/online-schema-changes.html) (e.g., table/index drops) to appear stuck in state "waiting for MVCC GC" for much longer than expected. This fix only applies to future schema changes. Existing stuck jobs can be processed by manually force-enqueueing the relevant [ranges](https://www.cockroachlabs.com/docs/v23.1/architecture/overview.html#architecture-range) in the MVCC GC queue under the [DB Console's **Advanced Debug** page](https://www.cockroachlabs.com/docs/v23.1/ui-debug-pages.html). #110643 +- Fixed a bug that could cause internal errors or panics while attempting to forecast [statistics](https://www.cockroachlabs.com/docs/v23.1/cost-based-optimizer.html#table-statistics) on a numeric column. #113798 +- Fixed a bug where [`ALTER PRIMARY KEY`](https://www.cockroachlabs.com/docs/v23.1/alter-table.html#alter-primary-key) would incorrectly disable [secondary indexes](https://www.cockroachlabs.com/docs/v23.1/indexes.html) while new secondary indexes were being backfilled when using the [declarative schema changer](https://www.cockroachlabs.com/docs/v23.1/online-schema-changes.html). #113183 +- Previously, when executing queries with [index / lookup joins](https://www.cockroachlabs.com/docs/v23.1/joins.html) when the ordering needs to be maintained, CockroachDB could in some cases inadvertently increase query latency, possibly by 1 or 2 orders of magnitude. This bug was introduced in v22.2 and is now fixed. #114143 +- Fixed a bug where the [`SHOW STATISTICS`](https://www.cockroachlabs.com/docs/v23.1/show-statistics.html) command would incorrectly require the user to have the [admin role](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization.html#admin-role). It was intended to only require the user to have any privilege on the table being inspected. #114479 +- Fixed a bug that could cause a query plan to skip scanning rows from the local region when performing a [lookup join](https://www.cockroachlabs.com/docs/v23.1/joins.html) with a [`REGIONAL BY ROW`](https://www.cockroachlabs.com/docs/v23.1/table-localities.html#regional-by-row-tables) table as the input. #114456 +- Fixed a bug that could cause [`ALTER DATABASE ... ADD/DROP REGION`](https://www.cockroachlabs.com/docs/v23.1/alter-database.html#add-region) to hang if node localities were changed after regions were added. #114197

Performance improvements

-- Addressed a performance regression that could happen when the [declarative schema changer](https://www.cockroachlabs.com/docs/v23.1/online-schema-changes.html) is being used to [create an index](https://www.cockroachlabs.com/docs/v23.1/create-index.html) with a concurrent workload. [#113724][#113724] -- Added an off-by-default [cluster setting](https://www.cockroachlabs.com/docs/v23.1/cluster-settings.html), `kv.dist_sender.follower_reads_unhealthy.enabled`, which when enabled will prevent failed requests from being issued on followers that are [draining](https://www.cockroachlabs.com/docs/v23.1/node-shutdown.html#draining), [decommissioning](https://www.cockroachlabs.com/docs/v23.1/node-shutdown?filters=decommission), or otherwise unhealthy. This will prevent [follower reads](https://www.cockroachlabs.com/docs/v23.1/follower-reads.html) against nodes in such states. This prevents latency spikes if those nodes later go offline. [#114367][#114367] -- [Query planning](https://www.cockroachlabs.com/docs/v23.1/cost-based-optimizer.html) time has been reduced significantly for some queries in which many tables are [joined](https://www.cockroachlabs.com/docs/v23.1/joins.html). [#114835][#114835] +- Addressed a performance regression that could happen when the [declarative schema changer](https://www.cockroachlabs.com/docs/v23.1/online-schema-changes.html) is being used to [create an index](https://www.cockroachlabs.com/docs/v23.1/create-index.html) with a concurrent workload. #113724 +- Added an off-by-default [cluster setting](https://www.cockroachlabs.com/docs/v23.1/cluster-settings.html), `kv.dist_sender.follower_reads_unhealthy.enabled`, which when enabled will prevent failed requests from being issued on followers that are [draining](https://www.cockroachlabs.com/docs/v23.1/node-shutdown.html#draining), [decommissioning](https://www.cockroachlabs.com/docs/v23.1/node-shutdown?filters=decommission), or otherwise unhealthy. This will prevent [follower reads](https://www.cockroachlabs.com/docs/v23.1/follower-reads.html) against nodes in such states. This prevents latency spikes if those nodes later go offline. #114367 +- [Query planning](https://www.cockroachlabs.com/docs/v23.1/cost-based-optimizer.html) time has been reduced significantly for some queries in which many tables are [joined](https://www.cockroachlabs.com/docs/v23.1/joins.html). #114835
@@ -57,26 +57,3 @@ This release includes 118 merged PRs by 42 authors.
-[#110643]: https://github.com/cockroachdb/cockroach/pull/110643 -[#111119]: https://github.com/cockroachdb/cockroach/pull/111119 -[#111290]: https://github.com/cockroachdb/cockroach/pull/111290 -[#111863]: https://github.com/cockroachdb/cockroach/pull/111863 -[#112758]: https://github.com/cockroachdb/cockroach/pull/112758 -[#112795]: https://github.com/cockroachdb/cockroach/pull/112795 -[#112864]: https://github.com/cockroachdb/cockroach/pull/112864 -[#112906]: https://github.com/cockroachdb/cockroach/pull/112906 -[#113039]: https://github.com/cockroachdb/cockroach/pull/113039 -[#113077]: https://github.com/cockroachdb/cockroach/pull/113077 -[#113108]: https://github.com/cockroachdb/cockroach/pull/113108 -[#113153]: https://github.com/cockroachdb/cockroach/pull/113153 -[#113171]: https://github.com/cockroachdb/cockroach/pull/113171 -[#113183]: https://github.com/cockroachdb/cockroach/pull/113183 -[#113724]: https://github.com/cockroachdb/cockroach/pull/113724 -[#113798]: https://github.com/cockroachdb/cockroach/pull/113798 -[#114143]: https://github.com/cockroachdb/cockroach/pull/114143 -[#114197]: https://github.com/cockroachdb/cockroach/pull/114197 -[#114367]: https://github.com/cockroachdb/cockroach/pull/114367 -[#114456]: https://github.com/cockroachdb/cockroach/pull/114456 -[#114479]: https://github.com/cockroachdb/cockroach/pull/114479 -[#114529]: https://github.com/cockroachdb/cockroach/pull/114529 -[#114835]: https://github.com/cockroachdb/cockroach/pull/114835 diff --git a/src/current/_includes/releases/v23.1/v23.1.14.md b/src/current/_includes/releases/v23.1/v23.1.14.md index 8c0c98f97c0..3962157a6a9 100644 --- a/src/current/_includes/releases/v23.1/v23.1.14.md +++ b/src/current/_includes/releases/v23.1/v23.1.14.md @@ -6,17 +6,17 @@ Release Date: January 17, 2024

SQL language changes

-- CockroachDB now supports [importing into]({% link v23.1/import-into.md %}) a table that has columns typed as arrays of user-defined types (such as enums). Tables that use multiple user-defined types with the same name but different schemas are still unsupported. [#116361][#116361] -- Added a new field, `StmtPosInTxn`, to the `CommonSQLExecDetails` (included in SQL audit logs, SQL execution logs, and telemetry events) to represent the statement's index (position) in the transaction. The first statement's `StmtPosInTxn` is `1`. [#116558][#116558] +- CockroachDB now supports [importing into]({% link v23.1/import-into.md %}) a table that has columns typed as arrays of user-defined types (such as enums). Tables that use multiple user-defined types with the same name but different schemas are still unsupported. #116361 +- Added a new field, `StmtPosInTxn`, to the `CommonSQLExecDetails` (included in SQL audit logs, SQL execution logs, and telemetry events) to represent the statement's index (position) in the transaction. The first statement's `StmtPosInTxn` is `1`. #116558

Operational changes

-- Updated the [`changefeed.lagging_ranges_threshold`]({% link v23.1/cluster-settings.md %}#settings) and [`changefeed.lagging_ranges_polling_interval`]({% link v23.1/cluster-settings.md %}#settings) cluster settings to be public in v23.1. [#115803][#115803] +- Updated the [`changefeed.lagging_ranges_threshold`]({% link v23.1/cluster-settings.md %}#settings) and [`changefeed.lagging_ranges_polling_interval`]({% link v23.1/cluster-settings.md %}#settings) cluster settings to be public in v23.1. #115803

DB Console changes

-- The [**Cluster Overview** page]({% link v23.1/ui-cluster-overview-page.md %}) now correctly renders the background color for email signups, fixing an issue where it was difficult to read the text. [#114546][#114546] -- Updated the **CPU Time** label to **SQL CPU Time** on the [Overview page]({% link v23.1/ui-overview-dashboard.md %}) and clarified the tooltip. [#116448][#116448] +- The [**Cluster Overview** page]({% link v23.1/ui-cluster-overview-page.md %}) now correctly renders the background color for email signups, fixing an issue where it was difficult to read the text. #114546 +- Updated the **CPU Time** label to **SQL CPU Time** on the [Overview page]({% link v23.1/ui-overview-dashboard.md %}) and clarified the tooltip. #116448 - Fixed an issue where the following `AggHistogram`-powered metrics reported quantiles incorrectly in the [Overview page]({% link v23.1/ui-overview-dashboard.md %}). The list of affected metrics is: - `changefeed.message_size_hist` - `changefeed.parallel_io_queue_nanos` @@ -28,34 +28,34 @@ Release Date: January 17, 2024 - `jobs.row_level_ttl.select_duration` - `jobs.row_level_ttl.delete_duration` - This bug affected only DB Console dashboards and not the Prometheus-compatible endpoint `/_status/vars`. [#114747][#114747] -- In the **SQL Activity Transaction Details** page, you can now view a transaction fingerprint ID across multiple applications by passing a comma-separated encoded string of transaction fingerprint IDs in the `appNames` URL search parameter. [#116102][#116102] + This bug affected only DB Console dashboards and not the Prometheus-compatible endpoint `/_status/vars`. #114747 +- In the **SQL Activity Transaction Details** page, you can now view a transaction fingerprint ID across multiple applications by passing a comma-separated encoded string of transaction fingerprint IDs in the `appNames` URL search parameter. #116102

Bug fixes

-- Fixed a bug in in the TimeScale component's **Now** button behavior that could prevent charts from updating after a custom time range was selected.[#115513][#115513] -- Fixed a bug where an active replication report update could get stuck in a retry loop on clusters with over 10000 ranges, which would prevent a node from shutting down cleanly. [#114242][#114242] -- Fixed a nil pointer dereference bug in the error handling for `GetFiles`. [#114829][#114829] -- Fixed a bug that prevented the **SQL Activity** page from showing internal statements when the `sql.stats.response.show_internal.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) was set to `true`. [#114819][#114819] -- Fixed a bug that could cause a discrepancy between computed statistics and the stored value for statics when a delete was rolled back. [#113772][#113772] -- Fixed a bug introduced in v23.1 that could cause an internal error Previously, CockroachDB could encounter an internal error when using a prepared statement using the `text` format with a user-defined composite type. [#115063][#115063] -- Fixed a bug that would cause a [prepared statement](https://www.cockroachlabs.com/docs/v23.1/sql-grammar#prepare_stmt) to fail if it references an enum as well as a table that has undergone a schema change. [#115174][#115174] -- Fixed a bug that could cause finalization during a major-version upgrade to contend with descriptor lease renewals on a large cluster. Descriptor lease renewals now always have a lower priority than finalization. [#114672][#114672] -- [Backups]({% link v23.1/backup.md %}) now evenly distribute their work across all replicas, including followers, regardless of leaseholder placement. [#115018][#115018] -- Fixed a bug that could cause replica processing in store queues to get stuck if a the replica's ID has changed. [#115036][#115036] -- Fixed an bug introduced in v22.2 that could lead to stuck queries or inaccurate results when using lookup [joins]({% link v23.1/joins.md %}) involving equality columns and multiple ranges. [#115581][#115581] -- Fixed a bug in the declarative schema changer that could cause [`CREATE INDEX`]({% link v23.1/create-index.md %}) with expressions to fail on materialized [views]({% link v23.1/views.md %}). [#115596][#115596] -- Fixed a bug that could cause an inaccorate "too few columns" error for a query that used `ANY {array}` syntax with a subquery. [#115591][#115591] -- Fixed a bug that could cause a "too few/many columns" errors for a query that used an `IN` or `NOT IN` clause with a non-trivial right operand, such as a subquery (rather than a constant tuple). [#115591][#115591] -- Fixed a bug introduced in v22.2 that could cause errors or inaccurate results when performing a lookup or index [join]({% link v23.1/joins.md %}) on a table with three or more column families. [#115899][#115899] -- Fixed a bug that could cause an internal error or a panic while attempting to forecast statistics on a numeric column. [#115899][#115899] -- Fixed a bug when issuing a [`BEGIN`]({% link v23.1/begin-transaction.md %}) statement where incorrect `Age` field could be logged in the statement log. This bug could also cause statements to incorrectly appear in the slow query log. [#115257][#115257] -- Fixed a bug in the SQL Statistics UI where the runtime was incorrectly calculated. This bug could cause recorded values to appear to exceed 100%. [#117497][#117497] -- Fixed a bug that could cause a `CREATE CHANGEFEED WITH {key_column}` statement to retry forever. [#117555][#117555] +- Fixed a bug in in the TimeScale component's **Now** button behavior that could prevent charts from updating after a custom time range was selected.#115513 +- Fixed a bug where an active replication report update could get stuck in a retry loop on clusters with over 10000 ranges, which would prevent a node from shutting down cleanly. #114242 +- Fixed a nil pointer dereference bug in the error handling for `GetFiles`. #114829 +- Fixed a bug that prevented the **SQL Activity** page from showing internal statements when the `sql.stats.response.show_internal.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) was set to `true`. #114819 +- Fixed a bug that could cause a discrepancy between computed statistics and the stored value for statics when a delete was rolled back. #113772 +- Fixed a bug introduced in v23.1 that could cause an internal error Previously, CockroachDB could encounter an internal error when using a prepared statement using the `text` format with a user-defined composite type. #115063 +- Fixed a bug that would cause a [prepared statement](https://www.cockroachlabs.com/docs/v23.1/sql-grammar#prepare_stmt) to fail if it references an enum as well as a table that has undergone a schema change. #115174 +- Fixed a bug that could cause finalization during a major-version upgrade to contend with descriptor lease renewals on a large cluster. Descriptor lease renewals now always have a lower priority than finalization. #114672 +- [Backups]({% link v23.1/backup.md %}) now evenly distribute their work across all replicas, including followers, regardless of leaseholder placement. #115018 +- Fixed a bug that could cause replica processing in store queues to get stuck if a the replica's ID has changed. #115036 +- Fixed an bug introduced in v22.2 that could lead to stuck queries or inaccurate results when using lookup [joins]({% link v23.1/joins.md %}) involving equality columns and multiple ranges. #115581 +- Fixed a bug in the declarative schema changer that could cause [`CREATE INDEX`]({% link v23.1/create-index.md %}) with expressions to fail on materialized [views]({% link v23.1/views.md %}). #115596 +- Fixed a bug that could cause an inaccorate "too few columns" error for a query that used `ANY {array}` syntax with a subquery. #115591 +- Fixed a bug that could cause a "too few/many columns" errors for a query that used an `IN` or `NOT IN` clause with a non-trivial right operand, such as a subquery (rather than a constant tuple). #115591 +- Fixed a bug introduced in v22.2 that could cause errors or inaccurate results when performing a lookup or index [join]({% link v23.1/joins.md %}) on a table with three or more column families. #115899 +- Fixed a bug that could cause an internal error or a panic while attempting to forecast statistics on a numeric column. #115899 +- Fixed a bug when issuing a [`BEGIN`]({% link v23.1/begin-transaction.md %}) statement where incorrect `Age` field could be logged in the statement log. This bug could also cause statements to incorrectly appear in the slow query log. #115257 +- Fixed a bug in the SQL Statistics UI where the runtime was incorrectly calculated. This bug could cause recorded values to appear to exceed 100%. #117497 +- Fixed a bug that could cause a `CREATE CHANGEFEED WITH {key_column}` statement to retry forever. #117555

Performance improvements

-- Reduced query planning time significantly for some queries that join multiple tables. [#114512][#114512][#116107][#116107] +- Reduced query planning time significantly for some queries that join multiple tables. #114512#116107
@@ -65,32 +65,3 @@ This release includes 128 merged PRs by 42 authors.
-[#113772]: https://github.com/cockroachdb/cockroach/pull/113772 -[#114242]: https://github.com/cockroachdb/cockroach/pull/114242 -[#114512]: https://github.com/cockroachdb/cockroach/pull/114512 -[#114546]: https://github.com/cockroachdb/cockroach/pull/114546 -[#114672]: https://github.com/cockroachdb/cockroach/pull/114672 -[#114747]: https://github.com/cockroachdb/cockroach/pull/114747 -[#114819]: https://github.com/cockroachdb/cockroach/pull/114819 -[#114829]: https://github.com/cockroachdb/cockroach/pull/114829 -[#115018]: https://github.com/cockroachdb/cockroach/pull/115018 -[#115036]: https://github.com/cockroachdb/cockroach/pull/115036 -[#115063]: https://github.com/cockroachdb/cockroach/pull/115063 -[#115174]: https://github.com/cockroachdb/cockroach/pull/115174 -[#115257]: https://github.com/cockroachdb/cockroach/pull/115257 -[#115513]: https://github.com/cockroachdb/cockroach/pull/115513 -[#115581]: https://github.com/cockroachdb/cockroach/pull/115581 -[#115591]: https://github.com/cockroachdb/cockroach/pull/115591 -[#115596]: https://github.com/cockroachdb/cockroach/pull/115596 -[#115603]: https://github.com/cockroachdb/cockroach/pull/115603 -[#115803]: https://github.com/cockroachdb/cockroach/pull/115803 -[#115899]: https://github.com/cockroachdb/cockroach/pull/115899 -[#116102]: https://github.com/cockroachdb/cockroach/pull/116102 -[#116107]: https://github.com/cockroachdb/cockroach/pull/116107 -[#116361]: https://github.com/cockroachdb/cockroach/pull/116361 -[#116448]: https://github.com/cockroachdb/cockroach/pull/116448 -[#116517]: https://github.com/cockroachdb/cockroach/pull/116517 -[#116558]: https://github.com/cockroachdb/cockroach/pull/116558 -[#117497]: https://github.com/cockroachdb/cockroach/pull/117497 -[#117555]: https://github.com/cockroachdb/cockroach/pull/117555 -[d27790ece]: https://github.com/cockroachdb/cockroach/commit/d27790ece diff --git a/src/current/_includes/releases/v23.1/v23.1.15.md b/src/current/_includes/releases/v23.1/v23.1.15.md index 98946611bf8..50ef0a9827c 100644 --- a/src/current/_includes/releases/v23.1/v23.1.15.md +++ b/src/current/_includes/releases/v23.1/v23.1.15.md @@ -6,30 +6,30 @@ Release Date: February 20, 2024

Security updates

-- The [DB Console]({% link v23.1/ui-overview.md %}) `session` cookie is now marked `HttpOnly` to prevent it from being read by any Javascript code and is marked `Secure` by the browser when the cluster operates in secure mode. [#119248][#119248] +- The [DB Console]({% link v23.1/ui-overview.md %}) `session` cookie is now marked `HttpOnly` to prevent it from being read by any Javascript code and is marked `Secure` by the browser when the cluster operates in secure mode. #119248

Bug fixes

-- Fixed a bug where [changefeeds]({% link v23.1/change-data-capture-overview.md %}) that targeted schema-locked tables could fail due to an old high-water timestamp being incorrectly persisted. [#117960][#117960] -- Fixed a bug where creating a [changefeed]({% link v23.1/change-data-capture-overview.md %}) that targeted tables with a `DECIMAL(n)` column (that is, zero-scale [`DECIMAL`]({% link v23.1/decimal.md %}) column), `format='avro'`, and `diff` would cause a panic. [#118893][#118893] -- Fixed a bug that could cause a [`CREATE CHANGEFEED WITH {key_column}`]({% link v23.1/create-changefeed.md %}) statement to retry forever. [#116966][#116966] -- Fixed a bug that prevented database [restore]({% link v23.1/restore.md %}) when the database contained a view or routine that referenced a user-defined type in the body string. For views, this bug was introduced in v20.2 when UDTs were introduced. For routines, this bug was introduced in v22.2 when UDFs were introduced. [#116903][#116903] -- Fixed a durability issue in the Raft log storage mechanism due to improper synchronization of filesystem metadata, potentially leading to the loss of specific write operations (`AddSSTable`), notably utilized by operations such as `RESTORE`. This vulnerability was exposed only under conditions of power failure or operating system crashes, potentially causing CockroachDB to enter a crash loop upon restart. In extreme scenarios, such as a simultaneous power outage or crash across multiple nodes, it could result in an irrecoverable quorum loss. [#117383][#117383] -- Fixed an issue in Raft log truncation that had the potential to cause crash loops and irretrievable quorum loss, especially in the rare but severe scenario where all replicas concurrently enter a crash loop. This issue emerged under conditions where the cluster was processing a bulk write operation (such as schema changes, imports, or restores), while a log truncation command was active, and the CockroachDB process experienced a crash. [#117299][#117299] -- Fixed the total runtime value referenced in SQL stats, resolving the bug where the [Console]({% link v23.1/ui-overview.md %}) erroneously displayed percentages over 100%. [#117496][#117496] -- Fixed an issue where the values for the current and past hour in the top Activity table were calculated incorrectly, causing a missing data issue in SQL stats and, consequently, on the [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) page. [#118427][#118427] -- Fixed a bug where CockroachDB would erroneously return an error if an empty search path parameter was encountered during search path setting. [#117556][#117556] -- Fixed a bug in the [Row-Level TTL]({% link v23.1/row-level-ttl.md %}) job that would cause it to skip expired rows if the primary key of the table included columns of the collated string type. This bug was present since the initial release of row-level TTL in v22.2.0. [#117513][#117513] -- Fixed a bug that could cause [`DELETE`]({% link v23.1/delete.md %}) queries sent by the [Row-Level TTL]({% link v23.1/row-level-ttl.md %}) job to use a secondary index rather than the primary index to find the rows to delete. This could lead to some `DELETE` operations taking a much longer time than they should. This bug was present since v22.2.0. [#118336][#118336] -- Fixed a bug where concurrent [`GRANT`]({% link v23.1/grant.md %}) statements can cause deadlocks. [#117712][#117712] -- Reduced the impact of bulk deletions ([`DROP TABLE`]({% link v23.1/drop-table.md %}), [`TRUNCATE`]({% link v23.1/truncate.md %}), or replica removals) on foreground traffic by altering storage engine compaction priorities. [#116560][#116560] -- Resolved an issue where DML operations would fail during the creation of a hash-sharded index, resulting in an error stating column `crdb_internal_val_shard_16` does not exist. This bug was present since v23.1.0. [#118238][#118238] -- [`AUTO CREATE STATS`]({% link v23.1/show-jobs.md %}#show-automatic-jobs) jobs no longer lead to growth in an internal system table resulting in slower job-system related queries. [#118980][#118980] -- [`ALTER PRIMARY KEY`]({% link v23.1/alter-table.md %}#alter-primary-key) no longer fail with an `non-nullable column with no value! Index scanned ..` error when validating recreated secondary indexes. [#118969][#118969] -- Fixed a bug where CockroachDB could encounter an error `unable to encode table key: *tree.DTSQuery` when operating on columns of [`TSQuery`]({% link v23.1/tsquery.md %}) type in some contexts (e.g. when collecting table statistics or when performing a `DISTINCT` operation). The bug has been present since 23.1 when support for TSQuery type was added. [#118320][#118320] -- Fixed a bug where in some cases CockroachDB could incorrectly evaluate queries that scanned an inverted index and had a `WHERE` filter in which two sides of the `AND` expression had "similar" expressions (e.g. `ARRAY['str1'] <@ col AND (ARRAY['str1'] && col OR ...)`). The bug has been present since pre-22.2 version. [#118359][#118359] -- CockroachDB now correctly logs the top 5 hot ranges per cluster instead of per node. [#118371][#118371] -- Fixed a bug where a [changefeed]({% link v23.1/change-data-capture-overview.md %}) could omit events in rare cases, logging the error `cdc ux violation: detected timestamp ... that is less or equal to the local frontier`. This can happen if a [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) runs on a follower replica that lags significantly behind the leaseholder, a transaction commits and removes its transaction record before its intent resolution is applied on the follower, the follower's closed timestamp has advanced past the transaction commit timestamp, and the rangefeed attempts to push the transaction to a new timestamp (at least 10 seconds after the transaction began). This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the changefeed to drop these events entirely, never emitting them. [#119270][#119270] +- Fixed a bug where [changefeeds]({% link v23.1/change-data-capture-overview.md %}) that targeted schema-locked tables could fail due to an old high-water timestamp being incorrectly persisted. #117960 +- Fixed a bug where creating a [changefeed]({% link v23.1/change-data-capture-overview.md %}) that targeted tables with a `DECIMAL(n)` column (that is, zero-scale [`DECIMAL`]({% link v23.1/decimal.md %}) column), `format='avro'`, and `diff` would cause a panic. #118893 +- Fixed a bug that could cause a [`CREATE CHANGEFEED WITH {key_column}`]({% link v23.1/create-changefeed.md %}) statement to retry forever. #116966 +- Fixed a bug that prevented database [restore]({% link v23.1/restore.md %}) when the database contained a view or routine that referenced a user-defined type in the body string. For views, this bug was introduced in v20.2 when UDTs were introduced. For routines, this bug was introduced in v22.2 when UDFs were introduced. #116903 +- Fixed a durability issue in the Raft log storage mechanism due to improper synchronization of filesystem metadata, potentially leading to the loss of specific write operations (`AddSSTable`), notably utilized by operations such as `RESTORE`. This vulnerability was exposed only under conditions of power failure or operating system crashes, potentially causing CockroachDB to enter a crash loop upon restart. In extreme scenarios, such as a simultaneous power outage or crash across multiple nodes, it could result in an irrecoverable quorum loss. #117383 +- Fixed an issue in Raft log truncation that had the potential to cause crash loops and irretrievable quorum loss, especially in the rare but severe scenario where all replicas concurrently enter a crash loop. This issue emerged under conditions where the cluster was processing a bulk write operation (such as schema changes, imports, or restores), while a log truncation command was active, and the CockroachDB process experienced a crash. #117299 +- Fixed the total runtime value referenced in SQL stats, resolving the bug where the [Console]({% link v23.1/ui-overview.md %}) erroneously displayed percentages over 100%. #117496 +- Fixed an issue where the values for the current and past hour in the top Activity table were calculated incorrectly, causing a missing data issue in SQL stats and, consequently, on the [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) page. #118427 +- Fixed a bug where CockroachDB would erroneously return an error if an empty search path parameter was encountered during search path setting. #117556 +- Fixed a bug in the [Row-Level TTL]({% link v23.1/row-level-ttl.md %}) job that would cause it to skip expired rows if the primary key of the table included columns of the collated string type. This bug was present since the initial release of row-level TTL in v22.2.0. #117513 +- Fixed a bug that could cause [`DELETE`]({% link v23.1/delete.md %}) queries sent by the [Row-Level TTL]({% link v23.1/row-level-ttl.md %}) job to use a secondary index rather than the primary index to find the rows to delete. This could lead to some `DELETE` operations taking a much longer time than they should. This bug was present since v22.2.0. #118336 +- Fixed a bug where concurrent [`GRANT`]({% link v23.1/grant.md %}) statements can cause deadlocks. #117712 +- Reduced the impact of bulk deletions ([`DROP TABLE`]({% link v23.1/drop-table.md %}), [`TRUNCATE`]({% link v23.1/truncate.md %}), or replica removals) on foreground traffic by altering storage engine compaction priorities. #116560 +- Resolved an issue where DML operations would fail during the creation of a hash-sharded index, resulting in an error stating column `crdb_internal_val_shard_16` does not exist. This bug was present since v23.1.0. #118238 +- [`AUTO CREATE STATS`]({% link v23.1/show-jobs.md %}#show-automatic-jobs) jobs no longer lead to growth in an internal system table resulting in slower job-system related queries. #118980 +- [`ALTER PRIMARY KEY`]({% link v23.1/alter-table.md %}#alter-primary-key) no longer fail with an `non-nullable column with no value! Index scanned ..` error when validating recreated secondary indexes. #118969 +- Fixed a bug where CockroachDB could encounter an error `unable to encode table key: *tree.DTSQuery` when operating on columns of [`TSQuery`]({% link v23.1/tsquery.md %}) type in some contexts (e.g. when collecting table statistics or when performing a `DISTINCT` operation). The bug has been present since 23.1 when support for TSQuery type was added. #118320 +- Fixed a bug where in some cases CockroachDB could incorrectly evaluate queries that scanned an inverted index and had a `WHERE` filter in which two sides of the `AND` expression had "similar" expressions (e.g. `ARRAY['str1'] <@ col AND (ARRAY['str1'] && col OR ...)`). The bug has been present since pre-22.2 version. #118359 +- CockroachDB now correctly logs the top 5 hot ranges per cluster instead of per node. #118371 +- Fixed a bug where a [changefeed]({% link v23.1/change-data-capture-overview.md %}) could omit events in rare cases, logging the error `cdc ux violation: detected timestamp ... that is less or equal to the local frontier`. This can happen if a [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) runs on a follower replica that lags significantly behind the leaseholder, a transaction commits and removes its transaction record before its intent resolution is applied on the follower, the follower's closed timestamp has advanced past the transaction commit timestamp, and the rangefeed attempts to push the transaction to a new timestamp (at least 10 seconds after the transaction began). This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the changefeed to drop these events entirely, never emitting them. #119270
@@ -42,32 +42,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#116560]: https://github.com/cockroachdb/cockroach/pull/116560 -[#116903]: https://github.com/cockroachdb/cockroach/pull/116903 -[#116966]: https://github.com/cockroachdb/cockroach/pull/116966 -[#117299]: https://github.com/cockroachdb/cockroach/pull/117299 -[#117383]: https://github.com/cockroachdb/cockroach/pull/117383 -[#117496]: https://github.com/cockroachdb/cockroach/pull/117496 -[#117513]: https://github.com/cockroachdb/cockroach/pull/117513 -[#117556]: https://github.com/cockroachdb/cockroach/pull/117556 -[#117712]: https://github.com/cockroachdb/cockroach/pull/117712 -[#117742]: https://github.com/cockroachdb/cockroach/pull/117742 -[#117960]: https://github.com/cockroachdb/cockroach/pull/117960 -[#118141]: https://github.com/cockroachdb/cockroach/pull/118141 -[#118238]: https://github.com/cockroachdb/cockroach/pull/118238 -[#118320]: https://github.com/cockroachdb/cockroach/pull/118320 -[#118336]: https://github.com/cockroachdb/cockroach/pull/118336 -[#118359]: https://github.com/cockroachdb/cockroach/pull/118359 -[#118371]: https://github.com/cockroachdb/cockroach/pull/118371 -[#118427]: https://github.com/cockroachdb/cockroach/pull/118427 -[#118564]: https://github.com/cockroachdb/cockroach/pull/118564 -[#118590]: https://github.com/cockroachdb/cockroach/pull/118590 -[#118893]: https://github.com/cockroachdb/cockroach/pull/118893 -[#118919]: https://github.com/cockroachdb/cockroach/pull/118919 -[#118969]: https://github.com/cockroachdb/cockroach/pull/118969 -[#118980]: https://github.com/cockroachdb/cockroach/pull/118980 -[#119248]: https://github.com/cockroachdb/cockroach/pull/119248 -[#119270]: https://github.com/cockroachdb/cockroach/pull/119270 -[566a30300]: https://github.com/cockroachdb/cockroach/commit/566a30300 -[7667710a0]: https://github.com/cockroachdb/cockroach/commit/7667710a0 -[ce971160e]: https://github.com/cockroachdb/cockroach/commit/ce971160e diff --git a/src/current/_includes/releases/v23.1/v23.1.16.md b/src/current/_includes/releases/v23.1/v23.1.16.md index 68054969555..51d83f93122 100644 --- a/src/current/_includes/releases/v23.1/v23.1.16.md +++ b/src/current/_includes/releases/v23.1/v23.1.16.md @@ -6,7 +6,7 @@ Release Date: February 27, 2024

Bug fixes

-- Fixed a bug where [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) resolved timestamps could get stuck, continually emitting the [log message]({% link v23.1/logging.md %}) `"pushing old intents failed: range barrier failed, range split"`, typically following a [range merge](https://www.cockroachlabs.com/docs/v23.1/architecture/distribution-layer#range-merges). This bug was introduced in v23.1.15. [#119559][#119559] +- Fixed a bug where [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) resolved timestamps could get stuck, continually emitting the [log message]({% link v23.1/logging.md %}) `"pushing old intents failed: range barrier failed, range split"`, typically following a [range merge](https://www.cockroachlabs.com/docs/v23.1/architecture/distribution-layer#range-merges). This bug was introduced in v23.1.15. #119559

Contributors

@@ -14,4 +14,3 @@ This release includes 2 merged PRs by 2 authors. -[#119559]: https://github.com/cockroachdb/cockroach/pull/119559 diff --git a/src/current/_includes/releases/v23.1/v23.1.17.md b/src/current/_includes/releases/v23.1/v23.1.17.md index 7749138fad7..ef67986ebb4 100644 --- a/src/current/_includes/releases/v23.1/v23.1.17.md +++ b/src/current/_includes/releases/v23.1/v23.1.17.md @@ -6,35 +6,35 @@ Release Date: March 19, 2024

Security updates

-- A user with the [`VIEWACTIVITY` privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) can now request a statement bundle from the [**Statements** page]({% link v23.1/ui-statements-page.md %}) in the DB Console or with the internal builtin function `crdb_internal.requests_statement_bundle`. [#119638][#119638] -- Clusters using [Cluster Single Sign-on (SSO) with JSON web tokens (JWTs)]({% link v23.1/sso-sql.md %}) can now optionally fetch signing keys from configured issuers instead of configuring static signing keys for each issuer. When the new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.jwt_authentication.jwks_auto_fetch.enabled` is set to `true`, signing keys are automatically fetched from the issuer using metadata published in its OpenID configuration. In this case, static signing keys in `server.jwt_authentication.jwks` are ignored. When automatic fetching is enabled, there may be a slight increase in network latency for each JWT authentication request, proportional to the latency between the cluster and the issuer's endpoint. [#120063][#120063] -- [DB Console]({% link v23.1/ui-overview.md %}) cookies are marked `Secure` for the browser when the cluster is running in secure mode. [#119262][#119262] -- The [DB Console]({% link v23.1/ui-overview.md %}) `session` cookie is now marked `HttpOnly` to prevent it from being read by any Javascript code. [#119262][#119262] +- A user with the [`VIEWACTIVITY` privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) can now request a statement bundle from the [**Statements** page]({% link v23.1/ui-statements-page.md %}) in the DB Console or with the internal builtin function `crdb_internal.requests_statement_bundle`. #119638 +- Clusters using [Cluster Single Sign-on (SSO) with JSON web tokens (JWTs)]({% link v23.1/sso-sql.md %}) can now optionally fetch signing keys from configured issuers instead of configuring static signing keys for each issuer. When the new [cluster setting]({% link v23.1/cluster-settings.md %}) `server.jwt_authentication.jwks_auto_fetch.enabled` is set to `true`, signing keys are automatically fetched from the issuer using metadata published in its OpenID configuration. In this case, static signing keys in `server.jwt_authentication.jwks` are ignored. When automatic fetching is enabled, there may be a slight increase in network latency for each JWT authentication request, proportional to the latency between the cluster and the issuer's endpoint. #120063 +- [DB Console]({% link v23.1/ui-overview.md %}) cookies are marked `Secure` for the browser when the cluster is running in secure mode. #119262 +- The [DB Console]({% link v23.1/ui-overview.md %}) `session` cookie is now marked `HttpOnly` to prevent it from being read by any Javascript code. #119262

SQL language changes

-- The new [cluster setting](https://cockroachlabs.com/docs/v23.1/cluster-settings) `server.max_open_transactions_per_gateway`, when set to a non-negative value, prevents users without the [`admin` role ](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#admin-role) from executing a query if more than this number of [transactions]({% link v23.1/transactions.md %}) are open on the current [gateway node](https://www.cockroachlabs.com/docs/v23.1/architecture/life-of-a-distributed-transaction#gateway). [#118949][#118949] -- Added support for [index hinting]({% link v23.1/table-expressions.md %}#force-index-selection) with [`INSERT`]({% link v23.1/insert.md %}) and [`UPSERT`]({% link v23.1/upsert.md %}) statements. This allows `INSERT ... ON CONFLICT` and `UPSERT` queries to use index hints in the same way as [`UPDATE`]({% link v23.1/update.md %}) and [`DELETE`]({% link v23.1/delete.md %}) statements. [#119600][#119600] +- The new [cluster setting](https://cockroachlabs.com/docs/v23.1/cluster-settings) `server.max_open_transactions_per_gateway`, when set to a non-negative value, prevents users without the [`admin` role ](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#admin-role) from executing a query if more than this number of [transactions]({% link v23.1/transactions.md %}) are open on the current [gateway node](https://www.cockroachlabs.com/docs/v23.1/architecture/life-of-a-distributed-transaction#gateway). #118949 +- Added support for [index hinting]({% link v23.1/table-expressions.md %}#force-index-selection) with [`INSERT`]({% link v23.1/insert.md %}) and [`UPSERT`]({% link v23.1/upsert.md %}) statements. This allows `INSERT ... ON CONFLICT` and `UPSERT` queries to use index hints in the same way as [`UPDATE`]({% link v23.1/update.md %}) and [`DELETE`]({% link v23.1/delete.md %}) statements. #119600

Operational changes

-- You can now pass the `--include-range-info` flag when [generating a diagnostic bundle]({% link v23.1/cockroach-debug-zip.md %}) to include problem ranges. [#119233][#119233] +- You can now pass the `--include-range-info` flag when [generating a diagnostic bundle]({% link v23.1/cockroach-debug-zip.md %}) to include problem ranges. #119233 - In unredacted diagnostic bundles, two columns have been added to the `crdb_internal.transaction_contention_events` table: - `waiting_stmt_query`: the query of the statement that is waiting to run. - `blocking_txn_queries_unordered`: an unordered list of the blocking transaction's queries. - [#119422][#119422] + #119422

DB Console changes

-- The [**Statements** page]({% link v23.1/ui-statements-page.md %}) now always shows the entire selected period, instead of only the period that contains data. [#118811][#118811] +- The [**Statements** page]({% link v23.1/ui-statements-page.md %}) now always shows the entire selected period, instead of only the period that contains data. #118811 - The [**Overload Dashboard**]({% link v23.1/ui-overload-dashboard.md %}) page now includes two additional graphs: - **Elastic CPU Utilization**: Shows the actual CPU used for elastic work compared with the configured limit. - - **Elastic CPU Exhausted Duration Per Second**: Shows how much time, in milliseconds, that elastic work has been subject to CPU exhaustion. [#118908][#118908] + - **Elastic CPU Exhausted Duration Per Second**: Shows how much time, in milliseconds, that elastic work has been subject to CPU exhaustion. #118908

Bug fixes

-- Fixed a bug where creating a [changefeed]({% link v23.1/change-data-capture-overview.md %}) that targeted tables with a zero-scale [`DECIMAL(n)`]({% link v23.1/decimal.md %}) column, `format='avro'`, and `diff` would cause a panic. [#118848][#118848] +- Fixed a bug where creating a [changefeed]({% link v23.1/change-data-capture-overview.md %}) that targeted tables with a zero-scale [`DECIMAL(n)`]({% link v23.1/decimal.md %}) column, `format='avro'`, and `diff` would cause a panic. #118848 - Fixed a bug where a [changefeed](https://www.cockroachlabs.com/docs/v23.1/change-data-capture-overview.html) could omit events in rare cases, logging the error `cdc ux violation: detected timestamp ... that is less or equal to the local frontier`. This could happen in the following scenario: 1. A [rangefeed](https://www.cockroachlabs.com/docs/v23.1/create-and-configure-changefeeds.html#enable-rangefeeds) runs on a follower [replica](https://www.cockroachlabs.com/docs/v23.1/architecture/glossary#cockroachdb-architecture-terms) that lags significantly behind the [leaseholder](https://www.cockroachlabs.com/docs/v23.1/architecture/glossary#cockroachdb-architecture-terms). 1. A transaction commits and removes its [transaction record](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#transaction-records) before its [intent](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#writing) resolution is applied on the follower. @@ -42,24 +42,24 @@ Release Date: March 19, 2024 1. The rangefeed attempts to push the transaction to a new timestamp (at least 10 seconds after the transaction began). 1. This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the [changefeed]({% link v23.1/change-data-capture-overview.md %}) to drop these events entirely, never emitting them. - [#118415][#118415] -- Fixed a bug introduced in v22.2 where queries issued by [TTL jobs]({% link v23.1/row-level-ttl.md %}#view-scheduled-ttl-jobs) did not use optimal plans. [#118496][#118496] -- Fixed a bug introduced in v23.1.0 where a [decommissioning](https://www.cockroachlabs.com/docs/v23.1/node-shutdown?filters=decommission#decommission-the-node) replica that is part of a mis-replicated range could get stuck on a rebalance operation that was falsely determined to be unsafe. [#118437][#118437] -- Fixed a bug introduced in v22.2 where adding multiple columns with [`UNIQUE` constraints]({% link v23.1/unique.md %}) in a single statement could result in the error `secondary index for backfill contains physical column not present in source primary index`. [#118137][#118137] -- Fixed a bug where an `unable to get CPU capacity` error could be logged every 10 seconds when running outside of a CPU `cgroup`. [#118670][#118670] -- Fixed a bug where an [`AUTO CREATE STATS` job]({% link v23.1/show-jobs.md %}#show-automatic-jobs) could cause an internal system table to grow, and this could cause slow queries related to the job system. [#118944][#118944] -- Fixed a bug that caused a confusing error when a sequence name allocated by [`SERIAL`]({% link v23.1/serial.md %}) conflicted with an existing type name. [#118948][#118948] -- Fixed a bug where an [`ALTER PRIMARY KEY`]({% link v23.1/alter-table.md %}#alter-primary-key) query could fail with the error `non-nullable column {x} with no value! Index scanned ..` when validating a recreated [secondary index]({% link v23.1/indexes.md %}). [#118972][#118972] -- Fixed a bug introduced in v23.1 that could cause the internal error `attempting to append refresh spans after the tracked timestamp has moved forward` to be logged in some cases when using virtual tables such as`crdb_internal.system_jobs`. [#119186][#119186] -- Fixed a bug in the `crdb_internal.leases` table where a deadlock in the leasing system could cause a node to become unavailable. [#119374][#119374] -- Fixed a bug introduced in v23.1.15 where a [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds)-resolved timestamp could get stuck after a [range merge](https://www.cockroachlabs.com/docs/v23.1/architecture/distribution-layer#range-merges) if the range cache thinks the barrier spans multiple ranges. The log message `pushing old intents failed: range barrier failed, range split` is constantly emitted. [#119543][#119543] -- Fixed a rare panic that could happen during a [`pg_dump` import]({% link v23.1/import.md %}) that contains a subquery in one of its arguments, such as a function like `SELECT addgeometrycolumn(...)`. Now, attempting to import a `pg_dump` with a function that has a subquery in one of its arguments results in an error rather than a panic. [#118611][#118611] -- Fixed a bug in the [webhook sink]({% link v23.1/changefeed-sinks.md %}#webhook-sink) where the `http` request body may not be initialized on retries, resulting in the error `http: ContentLength=... with Body length 0`. [#119495][#119495] -- Fixed a bug where some files were not closed when inspecting backup metadata during [`BACKUP`]({% link v23.1/backup.md %}) and [`RESTORE`]({% link v23.1/restore.md %}) operations. [#119634][#119634] -- Fixed a bug that could cause internal errors to be logged when executing an [`EXPORT`]({% link v23.1/export.md %}) statement. [#119712][#119712] -- Fixed a bug where a [schema change]({% link v23.1/online-schema-changes.md %}) with a large number of descriptors could perform a [full table scan]({% link v23.1/show-full-table-scans.md %}) on the `system.leases` table. [#119466][#119466] -- Fixed a bug introduced in v22.1.0 that could cause the `sql.txns.open.internal` and `sql.statements.active.internal` gauge metrics never to be updated, leading to an incorrect count of the number of transactions and statements executed by operations internal to CockroachDB. These metrics do not include operations executed by external SQL clients. [#119338][#119338] -- Fixed a bug introduced in v22.2.9 that could cause a slow memory leak that can accumulate when opening many new connections. [#120244][#120244] + #118415 +- Fixed a bug introduced in v22.2 where queries issued by [TTL jobs]({% link v23.1/row-level-ttl.md %}#view-scheduled-ttl-jobs) did not use optimal plans. #118496 +- Fixed a bug introduced in v23.1.0 where a [decommissioning](https://www.cockroachlabs.com/docs/v23.1/node-shutdown?filters=decommission#decommission-the-node) replica that is part of a mis-replicated range could get stuck on a rebalance operation that was falsely determined to be unsafe. #118437 +- Fixed a bug introduced in v22.2 where adding multiple columns with [`UNIQUE` constraints]({% link v23.1/unique.md %}) in a single statement could result in the error `secondary index for backfill contains physical column not present in source primary index`. #118137 +- Fixed a bug where an `unable to get CPU capacity` error could be logged every 10 seconds when running outside of a CPU `cgroup`. #118670 +- Fixed a bug where an [`AUTO CREATE STATS` job]({% link v23.1/show-jobs.md %}#show-automatic-jobs) could cause an internal system table to grow, and this could cause slow queries related to the job system. #118944 +- Fixed a bug that caused a confusing error when a sequence name allocated by [`SERIAL`]({% link v23.1/serial.md %}) conflicted with an existing type name. #118948 +- Fixed a bug where an [`ALTER PRIMARY KEY`]({% link v23.1/alter-table.md %}#alter-primary-key) query could fail with the error `non-nullable column {x} with no value! Index scanned ..` when validating a recreated [secondary index]({% link v23.1/indexes.md %}). #118972 +- Fixed a bug introduced in v23.1 that could cause the internal error `attempting to append refresh spans after the tracked timestamp has moved forward` to be logged in some cases when using virtual tables such as`crdb_internal.system_jobs`. #119186 +- Fixed a bug in the `crdb_internal.leases` table where a deadlock in the leasing system could cause a node to become unavailable. #119374 +- Fixed a bug introduced in v23.1.15 where a [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds)-resolved timestamp could get stuck after a [range merge](https://www.cockroachlabs.com/docs/v23.1/architecture/distribution-layer#range-merges) if the range cache thinks the barrier spans multiple ranges. The log message `pushing old intents failed: range barrier failed, range split` is constantly emitted. #119543 +- Fixed a rare panic that could happen during a [`pg_dump` import]({% link v23.1/import.md %}) that contains a subquery in one of its arguments, such as a function like `SELECT addgeometrycolumn(...)`. Now, attempting to import a `pg_dump` with a function that has a subquery in one of its arguments results in an error rather than a panic. #118611 +- Fixed a bug in the [webhook sink]({% link v23.1/changefeed-sinks.md %}#webhook-sink) where the `http` request body may not be initialized on retries, resulting in the error `http: ContentLength=... with Body length 0`. #119495 +- Fixed a bug where some files were not closed when inspecting backup metadata during [`BACKUP`]({% link v23.1/backup.md %}) and [`RESTORE`]({% link v23.1/restore.md %}) operations. #119634 +- Fixed a bug that could cause internal errors to be logged when executing an [`EXPORT`]({% link v23.1/export.md %}) statement. #119712 +- Fixed a bug where a [schema change]({% link v23.1/online-schema-changes.md %}) with a large number of descriptors could perform a [full table scan]({% link v23.1/show-full-table-scans.md %}) on the `system.leases` table. #119466 +- Fixed a bug introduced in v22.1.0 that could cause the `sql.txns.open.internal` and `sql.statements.active.internal` gauge metrics never to be updated, leading to an incorrect count of the number of transactions and statements executed by operations internal to CockroachDB. These metrics do not include operations executed by external SQL clients. #119338 +- Fixed a bug introduced in v22.2.9 that could cause a slow memory leak that can accumulate when opening many new connections. #120244
@@ -69,35 +69,3 @@ This release includes 94 merged PRs by 33 authors.
-[#118137]: https://github.com/cockroachdb/cockroach/pull/118137 -[#118415]: https://github.com/cockroachdb/cockroach/pull/118415 -[#118437]: https://github.com/cockroachdb/cockroach/pull/118437 -[#118496]: https://github.com/cockroachdb/cockroach/pull/118496 -[#118560]: https://github.com/cockroachdb/cockroach/pull/118560 -[#118611]: https://github.com/cockroachdb/cockroach/pull/118611 -[#118670]: https://github.com/cockroachdb/cockroach/pull/118670 -[#118811]: https://github.com/cockroachdb/cockroach/pull/118811 -[#118848]: https://github.com/cockroachdb/cockroach/pull/118848 -[#118908]: https://github.com/cockroachdb/cockroach/pull/118908 -[#118944]: https://github.com/cockroachdb/cockroach/pull/118944 -[#118948]: https://github.com/cockroachdb/cockroach/pull/118948 -[#118949]: https://github.com/cockroachdb/cockroach/pull/118949 -[#118972]: https://github.com/cockroachdb/cockroach/pull/118972 -[#118992]: https://github.com/cockroachdb/cockroach/pull/118992 -[#119186]: https://github.com/cockroachdb/cockroach/pull/119186 -[#119233]: https://github.com/cockroachdb/cockroach/pull/119233 -[#119262]: https://github.com/cockroachdb/cockroach/pull/119262 -[#119338]: https://github.com/cockroachdb/cockroach/pull/119338 -[#119374]: https://github.com/cockroachdb/cockroach/pull/119374 -[#119400]: https://github.com/cockroachdb/cockroach/pull/119400 -[#119422]: https://github.com/cockroachdb/cockroach/pull/119422 -[#119466]: https://github.com/cockroachdb/cockroach/pull/119466 -[#119495]: https://github.com/cockroachdb/cockroach/pull/119495 -[#119543]: https://github.com/cockroachdb/cockroach/pull/119543 -[#119600]: https://github.com/cockroachdb/cockroach/pull/119600 -[#119634]: https://github.com/cockroachdb/cockroach/pull/119634 -[#119638]: https://github.com/cockroachdb/cockroach/pull/119638 -[#119695]: https://github.com/cockroachdb/cockroach/pull/119695 -[#119712]: https://github.com/cockroachdb/cockroach/pull/119712 -[#120063]: https://github.com/cockroachdb/cockroach/pull/120063 -[#120244]: https://github.com/cockroachdb/cockroach/pull/120244 diff --git a/src/current/_includes/releases/v23.1/v23.1.18.md b/src/current/_includes/releases/v23.1/v23.1.18.md index 22c8b3b49aa..e49ec31abc5 100644 --- a/src/current/_includes/releases/v23.1/v23.1.18.md +++ b/src/current/_includes/releases/v23.1/v23.1.18.md @@ -6,30 +6,30 @@ Release Date: April 9, 2024

SQL language changes

-- Fixed an oversight where CockroachDB was allowing mutation statements (such as [`UPDATE`]({% link v23.1/update.md %}) or [`DELETE`]({% link v23.1/delete.md %})) and locking statements (such as [`SELECT ... FOR UPDATE`]({% link v23.1/select-for-update.md %})) in implicit single-statement [transactions]({% link v23.1/transactions.md %}) using [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}). [#120159][#120159] +- Fixed an oversight where CockroachDB was allowing mutation statements (such as [`UPDATE`]({% link v23.1/update.md %}) or [`DELETE`]({% link v23.1/delete.md %})) and locking statements (such as [`SELECT ... FOR UPDATE`]({% link v23.1/select-for-update.md %})) in implicit single-statement [transactions]({% link v23.1/transactions.md %}) using [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}). #120159

Operational changes

-- The `admission.kv.bulk_only.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}), when set to `true`, only skips store-level [admission control]({% link v23.1/admission-control.md %}) for normal priority work, and continues to subject the work to CPU admission control. [#119173][#119173] +- The `admission.kv.bulk_only.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}), when set to `true`, only skips store-level [admission control]({% link v23.1/admission-control.md %}) for normal priority work, and continues to subject the work to CPU admission control. #119173

DB Console changes

-- Resolved an issue where clusters with multiple [stores]({% link v23.1/cockroach-start.md %}#store) per node could list inaccurate region/node information on the [Databases Page]({% link v23.1/ui-databases-page.md %}). [#120234][#120234] -- Fixed a bug where the [timeseries graphs shown on the **SQL Activity Statement Fingerprint** page]({% link v23.1/ui-statements-page.md %}#charts) in [DB Console]({% link v23.1/ui-overview.md %}) were not rendering properly. This involved fixing a bug related to setting the time range of the charts. [#121783][#121783] +- Resolved an issue where clusters with multiple [stores]({% link v23.1/cockroach-start.md %}#store) per node could list inaccurate region/node information on the [Databases Page]({% link v23.1/ui-databases-page.md %}). #120234 +- Fixed a bug where the [timeseries graphs shown on the **SQL Activity Statement Fingerprint** page]({% link v23.1/ui-statements-page.md %}#charts) in [DB Console]({% link v23.1/ui-overview.md %}) were not rendering properly. This involved fixing a bug related to setting the time range of the charts. #121783

Bug fixes

-- Previously, on long-running [sessions]({% link v23.1/show-sessions.md %}) that issue many (hundreds of thousands or more) [transactions]({% link v23.1/transactions.md %}), CockroachDB's internal memory accounting system, the limit for which is configured via the [`--max-sql-memory` flag]({% link v23.1/cockroach-start.md %}#general)), could leak. This bug, in turn, could result in the error message `"root: memory budget exceeded"` for other queries. The bug was present in v23.1.17 and is now fixed. [#121874][#121874] -- Fix a bug where running [`RESTORE`]({% link v23.1/restore.md %}) on certain [backups]({% link v23.1/backup-and-restore-overview.md %}) would open a very large number of connections to the [backup storage provider]({% link v23.1/use-cloud-storage.md %}). [#119925][#119925] -- A user with the `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#managing-privileges) can no longer see constants inside of queries that originate from other [users](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#roles) in the [`SHOW SESSIONS`]({% link v23.1/show-sessions.md %}) result. Previously, this redaction did not occur. [#119963][#119963] -- Previously, the [`SHOW QUERIES`]({% link v23.1/show-statements.md %}#aliases) and [`SHOW STATEMENTS`]({% link v23.1/show-statements.md %}) required the user to have the `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#managing-privileges). However, a user should always be able to view their own queries, even without these privileges. This is now fixed. [#119963][#119963] -- Fixed a bug in which it was possible to `SET transaction_read_only = false` during an [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}) transaction. [#120159][#120159] -- Fixed a slow memory leak that could accumulate when opening many new [connections]({% link v23.1/connection-pooling.md %}). The bug was present in v22.2.9+ and v23.1+. [#120242][#120242] -- Users will no longer see [views]({% link v23.1/views.md %}) displayed on the [**Databases** page]({% link v23.1/ui-databases-page.md %}) in [DB Console]({% link v23.1/ui-overview.md %}). Previously, views would be listed with no information, only displaying errors. [#120213][#120213] +- Previously, on long-running [sessions]({% link v23.1/show-sessions.md %}) that issue many (hundreds of thousands or more) [transactions]({% link v23.1/transactions.md %}), CockroachDB's internal memory accounting system, the limit for which is configured via the [`--max-sql-memory` flag]({% link v23.1/cockroach-start.md %}#general)), could leak. This bug, in turn, could result in the error message `"root: memory budget exceeded"` for other queries. The bug was present in v23.1.17 and is now fixed. #121874 +- Fix a bug where running [`RESTORE`]({% link v23.1/restore.md %}) on certain [backups]({% link v23.1/backup-and-restore-overview.md %}) would open a very large number of connections to the [backup storage provider]({% link v23.1/use-cloud-storage.md %}). #119925 +- A user with the `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#managing-privileges) can no longer see constants inside of queries that originate from other [users](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#roles) in the [`SHOW SESSIONS`]({% link v23.1/show-sessions.md %}) result. Previously, this redaction did not occur. #119963 +- Previously, the [`SHOW QUERIES`]({% link v23.1/show-statements.md %}#aliases) and [`SHOW STATEMENTS`]({% link v23.1/show-statements.md %}) required the user to have the `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#managing-privileges). However, a user should always be able to view their own queries, even without these privileges. This is now fixed. #119963 +- Fixed a bug in which it was possible to `SET transaction_read_only = false` during an [`AS OF SYSTEM TIME`]({% link v23.1/as-of-system-time.md %}) transaction. #120159 +- Fixed a slow memory leak that could accumulate when opening many new [connections]({% link v23.1/connection-pooling.md %}). The bug was present in v22.2.9+ and v23.1+. #120242 +- Users will no longer see [views]({% link v23.1/views.md %}) displayed on the [**Databases** page]({% link v23.1/ui-databases-page.md %}) in [DB Console]({% link v23.1/ui-overview.md %}). Previously, views would be listed with no information, only displaying errors. #120213

Performance improvements

-- Enabled the [admission control]({% link v23.1/admission-control.md %}) CPU limiter for [row-level TTL]({% link v23.1/row-level-ttl.md %}) to reduce latency impact from CPU-intensive scans issued as part of [row-level TTL jobs]({% link v23.1/row-level-ttl.md %}#view-scheduled-ttl-jobs). [#120516][#120516] +- Enabled the [admission control]({% link v23.1/admission-control.md %}) CPU limiter for [row-level TTL]({% link v23.1/row-level-ttl.md %}) to reduce latency impact from CPU-intensive scans issued as part of [row-level TTL jobs]({% link v23.1/row-level-ttl.md %}#view-scheduled-ttl-jobs). #120516
@@ -39,18 +39,3 @@ This release includes 45 merged PRs by 27 authors.
-[#119173]: https://github.com/cockroachdb/cockroach/pull/119173 -[#119827]: https://github.com/cockroachdb/cockroach/pull/119827 -[#119855]: https://github.com/cockroachdb/cockroach/pull/119855 -[#119925]: https://github.com/cockroachdb/cockroach/pull/119925 -[#119963]: https://github.com/cockroachdb/cockroach/pull/119963 -[#119969]: https://github.com/cockroachdb/cockroach/pull/119969 -[#120020]: https://github.com/cockroachdb/cockroach/pull/120020 -[#120159]: https://github.com/cockroachdb/cockroach/pull/120159 -[#120213]: https://github.com/cockroachdb/cockroach/pull/120213 -[#120234]: https://github.com/cockroachdb/cockroach/pull/120234 -[#120242]: https://github.com/cockroachdb/cockroach/pull/120242 -[#120429]: https://github.com/cockroachdb/cockroach/pull/120429 -[#120516]: https://github.com/cockroachdb/cockroach/pull/120516 -[#121783]: https://github.com/cockroachdb/cockroach/pull/121783 -[#121874]: https://github.com/cockroachdb/cockroach/pull/121874 diff --git a/src/current/_includes/releases/v23.1/v23.1.19.md b/src/current/_includes/releases/v23.1/v23.1.19.md index ecae59e1828..92760930558 100644 --- a/src/current/_includes/releases/v23.1/v23.1.19.md +++ b/src/current/_includes/releases/v23.1/v23.1.19.md @@ -6,5 +6,4 @@ Release Date: April 18, 2024

Bug fixes

-- Reintroduced [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.auth.modify_cluster_setting_applies_to_all.enabled` so that mixed-version clusters can migrate off of this setting, which is deprecated in favor of the privilege [`MODIFYSQLCLUSTERSETTING`]({% link v23.1/set-cluster-setting.md %}#required-privileges). [#122455][#122455] -[#122455]: https://github.com/cockroachdb/cockroach/pull/122455 +- Reintroduced [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.auth.modify_cluster_setting_applies_to_all.enabled` so that mixed-version clusters can migrate off of this setting, which is deprecated in favor of the privilege [`MODIFYSQLCLUSTERSETTING`]({% link v23.1/set-cluster-setting.md %}#required-privileges). #122455 diff --git a/src/current/_includes/releases/v23.1/v23.1.2.md b/src/current/_includes/releases/v23.1/v23.1.2.md index fbd28487c6a..47ab05b1b73 100644 --- a/src/current/_includes/releases/v23.1/v23.1.2.md +++ b/src/current/_includes/releases/v23.1/v23.1.2.md @@ -6,160 +6,160 @@ Release Date: May 30, 2023

Backward-incompatible changes

-- Schema names in the [`search_path`]({% link v23.1/sql-name-resolution.md %}#search-path) session variable now respect case and must be delimited with double quotation marks. Previously, if a `search_path` was specified in the connection string parameters, it would be treated case insensitive by default. [#101493][#101493] +- Schema names in the [`search_path`]({% link v23.1/sql-name-resolution.md %}#search-path) session variable now respect case and must be delimited with double quotation marks. Previously, if a `search_path` was specified in the connection string parameters, it would be treated case insensitive by default. #101493 - [#101493][#101493] + #101493

General changes

-- Queries with invalid syntax are now logged at [the `INFO` level]({% link v23.1/logging.md %}#info) in the `SQL_EXEC` log channel. Previously, these were logged at the `ERROR` level. [#101094][#101094] +- Queries with invalid syntax are now logged at [the `INFO` level]({% link v23.1/logging.md %}#info) in the `SQL_EXEC` log channel. Previously, these were logged at the `ERROR` level. #101094

{{ site.data.products.enterprise }} edition changes

-- [CDC queries]({% link v23.1/cdc-queries.md %}) now support wrapped envelope with diff (`envelope='wrapped', diff`). [#101392][#101392] -- [Changefeeds using the `WITH confluent_schema_registry` option]({% link v23.1/stream-a-changefeed-to-a-confluent-cloud-kafka-cluster.md %}) will make fewer duplicate schema registrations. [#100844][#100844] -- Added logic to set the cluster's multi-region system database's [survival goal]({% link v23.1/multiregion-overview.md %}#survival-goals) to the max non-system database's survival whenever an `ALTER DATABASE...SURVIVE...FAILURE` is issued. [#102379][#102379] -- [Cluster SSO using JSON web tokens]({% link v23.1/sso-sql.md %}) (JWT) can now read SQL usernames from any JWT claims instead of requiring the subject claim to be used. The claim can be controlled by the `server.jwt_authentication.claim` [cluster setting]({% link v23.1/cluster-settings.md %}) with an empty string or "sub" equivalent to the previous behavior. [#103526][#103526] +- [CDC queries]({% link v23.1/cdc-queries.md %}) now support wrapped envelope with diff (`envelope='wrapped', diff`). #101392 +- [Changefeeds using the `WITH confluent_schema_registry` option]({% link v23.1/stream-a-changefeed-to-a-confluent-cloud-kafka-cluster.md %}) will make fewer duplicate schema registrations. #100844 +- Added logic to set the cluster's multi-region system database's [survival goal]({% link v23.1/multiregion-overview.md %}#survival-goals) to the max non-system database's survival whenever an `ALTER DATABASE...SURVIVE...FAILURE` is issued. #102379 +- [Cluster SSO using JSON web tokens]({% link v23.1/sso-sql.md %}) (JWT) can now read SQL usernames from any JWT claims instead of requiring the subject claim to be used. The claim can be controlled by the `server.jwt_authentication.claim` [cluster setting]({% link v23.1/cluster-settings.md %}) with an empty string or "sub" equivalent to the previous behavior. #103526

SQL language changes

-- Added a new [session variable]({% link v23.1/set-vars.md %}) `unbounded_parallel_scans`, which controls whether scans will be parallelized across [ranges](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-range) in more cases. Note that using this feature can lead to increased likelihood of [out-of-memory errors (OOMs)]({% link v23.1/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash), so it should be used with care (namely when you expect that the scan should read a "reasonable" number of rows - probably less than 10k). Also note that [queries with `LIMIT`s]({% link v23.1/limit-offset.md %}) aren't affected by this variable; cross-range parallelism of scans continue to be disabled for such queries. [#100948][#100948] -- Statements of type [`SET ...`]({% link v23.1/set-vars.md %}) are not longer displayed on the [Insights page]({% link v23.1/ui-insights-page.md %}). [#101669][#101669] -- Fixed [`crdb_internal.transaction_contention_events`](https://www.cockroachlabs.com/docs/v23.1/crdb-internal#transaction_contention_events), so it generates even if an error occurs when getting the names. [#101870][#101870] -- Added a new SQL activity updater job which updates the `system.transaction_activity` and `system.statement_activity` tables based on the [statistics]({% link v23.1/ui-statements-page.md %}#statement-statistics) tables. [#101996][#101996] -- Added two views to the [`crdb_internal catalog`]({% link v23.1/crdb-internal.md %}): `crdb_internal.statement_activity`, which surfaces data in the `persisted system.statement_activity` table and `crdb_internal.transaction_activity`, which surfaces the `system.transaction_activity` table. [#102002][#102002] -- Tables with [Row-level TTL]({% link v23.1/row-level-ttl.md %}) settings can now have outbound [foreign keys]({% link v23.1/foreign-key.md %}). [#101874][#101874] -- Span statistics are now unavailable on mixed-version clusters. [#101877][#101877] -- Introduced the `to_char(date, format)` [built-in function]({% link v23.1/functions-and-operators.md %}), which converts a given date to a string using the given format string. [#102989][#102989] -- Renamed an existing metric `changefeed.table_metadata_nanos` to `changefeed.schemafeed.table_metadata_nanos` and introduced a new metric `changefeed.schemafeed.table_history_scans`, which records the number of table history scans the [schema feed]({% link v23.1/changefeed-examples.md %}) performs. [#102977][#102977] -- Changed the [`OID`]({% link v23.1/oid.md %}) generation for `pg_catalog`. For example `column`, `index` and `constraint` `OID`'s will have different values. `Relation`, `type` and `function` `OID`'s remain unchanged. [#103556][#103556] -- Added a new [session setting]({% link v23.1/set-vars.md %}) `optimizer_use_improved_computed_column_filters_derivation`, which defaults to `FALSE`. When `TRUE`, the optimizer will derive filters on computed columns in more cases. [#103412][#103412] -- `Crdb_internal.transaction_contention_events`, `crdb_internal.node_contention_events`, and `crdb_internal.cluster_locks` will now redact keys provided the user has [`VIEWACTIVITYREDACTED`](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization). `Crdb_internal.node_contention_events` can only be viewed if the user has any of `admin`, `VIEWACTIVITY` or `VIEWACTIVITYREDACTED`. [#103637][#103637] +- Added a new [session variable]({% link v23.1/set-vars.md %}) `unbounded_parallel_scans`, which controls whether scans will be parallelized across [ranges](https://www.cockroachlabs.com/docs/v23.1/architecture/overview#architecture-range) in more cases. Note that using this feature can lead to increased likelihood of [out-of-memory errors (OOMs)]({% link v23.1/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash), so it should be used with care (namely when you expect that the scan should read a "reasonable" number of rows - probably less than 10k). Also note that [queries with `LIMIT`s]({% link v23.1/limit-offset.md %}) aren't affected by this variable; cross-range parallelism of scans continue to be disabled for such queries. #100948 +- Statements of type [`SET ...`]({% link v23.1/set-vars.md %}) are not longer displayed on the [Insights page]({% link v23.1/ui-insights-page.md %}). #101669 +- Fixed [`crdb_internal.transaction_contention_events`](https://www.cockroachlabs.com/docs/v23.1/crdb-internal#transaction_contention_events), so it generates even if an error occurs when getting the names. #101870 +- Added a new SQL activity updater job which updates the `system.transaction_activity` and `system.statement_activity` tables based on the [statistics]({% link v23.1/ui-statements-page.md %}#statement-statistics) tables. #101996 +- Added two views to the [`crdb_internal catalog`]({% link v23.1/crdb-internal.md %}): `crdb_internal.statement_activity`, which surfaces data in the `persisted system.statement_activity` table and `crdb_internal.transaction_activity`, which surfaces the `system.transaction_activity` table. #102002 +- Tables with [Row-level TTL]({% link v23.1/row-level-ttl.md %}) settings can now have outbound [foreign keys]({% link v23.1/foreign-key.md %}). #101874 +- Span statistics are now unavailable on mixed-version clusters. #101877 +- Introduced the `to_char(date, format)` [built-in function]({% link v23.1/functions-and-operators.md %}), which converts a given date to a string using the given format string. #102989 +- Renamed an existing metric `changefeed.table_metadata_nanos` to `changefeed.schemafeed.table_metadata_nanos` and introduced a new metric `changefeed.schemafeed.table_history_scans`, which records the number of table history scans the [schema feed]({% link v23.1/changefeed-examples.md %}) performs. #102977 +- Changed the [`OID`]({% link v23.1/oid.md %}) generation for `pg_catalog`. For example `column`, `index` and `constraint` `OID`'s will have different values. `Relation`, `type` and `function` `OID`'s remain unchanged. #103556 +- Added a new [session setting]({% link v23.1/set-vars.md %}) `optimizer_use_improved_computed_column_filters_derivation`, which defaults to `FALSE`. When `TRUE`, the optimizer will derive filters on computed columns in more cases. #103412 +- `Crdb_internal.transaction_contention_events`, `crdb_internal.node_contention_events`, and `crdb_internal.cluster_locks` will now redact keys provided the user has [`VIEWACTIVITYREDACTED`](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization). `Crdb_internal.node_contention_events` can only be viewed if the user has any of `admin`, `VIEWACTIVITY` or `VIEWACTIVITYREDACTED`. #103637

Operational changes

-- Added the `rebalancing.replicas.cpunanospersecond` histogram [metric]({% link v23.1/metrics.md %}), which provides insight into the distribution of replica CPU usage within a store. [#100509][#100509] -- Added the `rebalancing.replicas.queriespersecond` histogram [metric]({% link v23.1/metrics.md %}), which provides insight into the distribution of queries per replica within a store. [#100509][#100509] -- The amount of [replication](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer) traffic in flight from a single [Raft leader](https://www.cockroachlabs.com/docs/v23.1/architecture/reads-and-writes-overview#architecture-raft-leader) to a follower has been reduced from 256 MB to 32 MB. This reduces the chance of running out-of-memory during bulk write operations. This can be controlled via the environment variable `COCKROACH_RAFT_MAX_INFLIGHT_BYTES`. [#101507][#101507] -- Added the [metric]({% link v23.1/metrics.md %}) `leases.requests.latency`, which records a histogram of lease request latencies. [#100475][#100475] -- When local corruption of data is encountered by a background job in the [storage engine](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer), a node will now exit immediately. [#102274][#102274] -- When the [`--experimental-dns-srv`]({% link v23.1/cockroach-start.md %}#networking) flag is enabled, the `crdb` node will always attempt to query SRV records of an address when dialing remote targets, and fall back to use the address verbatim if the SRV query fails. Previously, SRV queries were only attempted once when a node starts. [#102468][#102468] -- The default Raft scheduler concurrency, controlled by `COCKROACH_SCHEDULER_CONCURRENCY` and defaulting to 8 per CPU core capped at 96, is now divided evenly across stores instead of applying individually per store. This avoids excessive Go scheduler pressure and memory usage on nodes with many stores. The common case of 1 store per node is not affected. [#103073][#103073] -- Workload generators now export Go runtime metrics via [Prometheus]({% link v23.1/monitor-cockroachdb-with-prometheus.md %}) endpoint. [#102392][#102392] -- [Multi-region]({% link v23.1/multiregion-overview.md %}) Serverless databases that are created without a primary region will now inherit regions from the Serverless cluster's regions. [#102627][#102627] +- Added the `rebalancing.replicas.cpunanospersecond` histogram [metric]({% link v23.1/metrics.md %}), which provides insight into the distribution of replica CPU usage within a store. #100509 +- Added the `rebalancing.replicas.queriespersecond` histogram [metric]({% link v23.1/metrics.md %}), which provides insight into the distribution of queries per replica within a store. #100509 +- The amount of [replication](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer) traffic in flight from a single [Raft leader](https://www.cockroachlabs.com/docs/v23.1/architecture/reads-and-writes-overview#architecture-raft-leader) to a follower has been reduced from 256 MB to 32 MB. This reduces the chance of running out-of-memory during bulk write operations. This can be controlled via the environment variable `COCKROACH_RAFT_MAX_INFLIGHT_BYTES`. #101507 +- Added the [metric]({% link v23.1/metrics.md %}) `leases.requests.latency`, which records a histogram of lease request latencies. #100475 +- When local corruption of data is encountered by a background job in the [storage engine](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer), a node will now exit immediately. #102274 +- When the [`--experimental-dns-srv`]({% link v23.1/cockroach-start.md %}#networking) flag is enabled, the `crdb` node will always attempt to query SRV records of an address when dialing remote targets, and fall back to use the address verbatim if the SRV query fails. Previously, SRV queries were only attempted once when a node starts. #102468 +- The default Raft scheduler concurrency, controlled by `COCKROACH_SCHEDULER_CONCURRENCY` and defaulting to 8 per CPU core capped at 96, is now divided evenly across stores instead of applying individually per store. This avoids excessive Go scheduler pressure and memory usage on nodes with many stores. The common case of 1 store per node is not affected. #103073 +- Workload generators now export Go runtime metrics via [Prometheus]({% link v23.1/monitor-cockroachdb-with-prometheus.md %}) endpoint. #102392 +- [Multi-region]({% link v23.1/multiregion-overview.md %}) Serverless databases that are created without a primary region will now inherit regions from the Serverless cluster's regions. #102627

Command-line changes

-- The [`cockroach debug zip`]({% link v23.1/cockroach-debug-zip.md %}) command now accepts an `--include-range-info` flag, which determines whether CockroachDB retrieves individual `nodes/*/ranges/*.json` files, which was previously done by default. For large clusters, this can dramatically reduce the size of the generated artifacts. This flag defaults to `FALSE`. [#102862][#102862] -- [Workload]({% link v23.1/cockroach-workload.md %}) now jitters the teardown of connections to prevent a [thundering herd](https://wikipedia.org/wiki/Thundering_herd_problem) of queries impacting P99 latency results. [#102395][#102395] -- Workload utility now has flags to tune the [connection pool]({% link v23.1/connection-pooling.md %}) used for testing. See `--conn-healthcheck-period`, `--min-conns`, and `--max-conn-*` flags for details. [#102395][#102395] -- Workload now supports every [PostgreSQL query mode](https://github.com/jackc/pgx/blob/fa5fbed497bc75acee05c1667a8760ce0d634cba/conn.go#L167-L182) available via the underlying `pgx` driver. [#102395][#102395] -- The `\connect` client-side command for the SQL shell (included in `cockroach sql`, `cockroach demo`, `cockroach-sql`) now recognizes the option `autocerts` as its last argument. When provided, `\c` will now try to discover a [Transport Layer Security (TLS)](https://www.cockroachlabs.com/docs/v23.1/security-reference/transport-layer-security) client certificate and key in the same directory(ies) as used by the previous connection URL. This feature makes it easier to switch usernames when TLS client/key files are available for both the previous and new username. [#103144][#103144] +- The [`cockroach debug zip`]({% link v23.1/cockroach-debug-zip.md %}) command now accepts an `--include-range-info` flag, which determines whether CockroachDB retrieves individual `nodes/*/ranges/*.json` files, which was previously done by default. For large clusters, this can dramatically reduce the size of the generated artifacts. This flag defaults to `FALSE`. #102862 +- [Workload]({% link v23.1/cockroach-workload.md %}) now jitters the teardown of connections to prevent a [thundering herd](https://wikipedia.org/wiki/Thundering_herd_problem) of queries impacting P99 latency results. #102395 +- Workload utility now has flags to tune the [connection pool]({% link v23.1/connection-pooling.md %}) used for testing. See `--conn-healthcheck-period`, `--min-conns`, and `--max-conn-*` flags for details. #102395 +- Workload now supports every [PostgreSQL query mode](https://github.com/jackc/pgx/blob/fa5fbed497bc75acee05c1667a8760ce0d634cba/conn.go#L167-L182) available via the underlying `pgx` driver. #102395 +- The `\connect` client-side command for the SQL shell (included in `cockroach sql`, `cockroach demo`, `cockroach-sql`) now recognizes the option `autocerts` as its last argument. When provided, `\c` will now try to discover a [Transport Layer Security (TLS)](https://www.cockroachlabs.com/docs/v23.1/security-reference/transport-layer-security) client certificate and key in the same directory(ies) as used by the previous connection URL. This feature makes it easier to switch usernames when TLS client/key files are available for both the previous and new username. #103144

DB Console changes

-- The `Application Name` column is now shown by default in the [Fingerprints Overview]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) pages. Statements and transactions fingerprints will be displayed per application on the Overview pages rather than grouped into a single fingerprint ID. [#101164][#101164] -- When going from the [Fingerprints Overview]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) pages or the [Insight Details]({% link v23.1/ui-insights-page.md %}) pages to the Fingerprint Details page, the Details page will fetch data for the statement with the provided application name. For Overview pages, this is the application name of the selected row. For Insight details, this is the application of the execution that generated the insight. [#101164][#101164] -- Updated the [Network Latency]({% link v23.1/ui-network-latency-page.md %}) side nav name and Network Diagnostics page title to Network. Updated the [Advanced Debugging]({% link v23.1/ui-debug-pages.md %}) page title to Advanced Debug. [#101758][#101758] -- Added an option to select the trace rate for [Statement Diagnostics]({% link v23.1/ui-statements-page.md %}#diagnostics) collection. [#101759][#101759] -- Added a time scale selector to the [Statement Diagnostics]({% link v23.1/ui-statements-page.md %}#diagnostics) page, making it possible to see bundles only from the selected period. [#101802][#101802] -- Added draining node as its own value on the [DB Console Overview]({% link v23.1/ui-overview.md %}) page, instead of counting it as a dead node. [#101793][#101793] -- Added the ability for users to view timestamps in [DB Console]({% link v23.1/ui-overview.md %}) in their preferred timezone via the cluster setting `ui.display_timezone`. Previously, only the timezones Coordinated Universal Time and America/New_York were supported. [#102195][#102195] -- An alert on [DB Console Overview]({% link v23.1/ui-overview.md %}) page is shown when the cluster setting `cluster.preserve_downgrade_option` is set, and no longer waits 48 hours to show. [#102913][#102913] -- Added [Transaction Insights]({% link cockroachcloud/insights-page.md %}) for Serverless. [#103363][#103363] -- Added `_status/load` to the list of Raw Status Endpoints on the [Advanced Debug]({% link v23.1/ui-debug-pages.md %}) page. [#103420][#103420] -- If a page crashed, a force refresh is no longer required to be able to see the other pages on [DB Console]({% link v23.1/ui-overview.md %}). [#103328][#103328] -- The filter on the [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) page is now working properly. Fixed the type on the JSON object from `stmtTyp` to `stmtType`. [#103411][#103411] -- Added missing information on the [Index Details]({% link v23.1/ui-databases-page.md %}#index-recommendations) page about latency information of most used fingerprints. [#103421][#103421] +- The `Application Name` column is now shown by default in the [Fingerprints Overview]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) pages. Statements and transactions fingerprints will be displayed per application on the Overview pages rather than grouped into a single fingerprint ID. #101164 +- When going from the [Fingerprints Overview]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) pages or the [Insight Details]({% link v23.1/ui-insights-page.md %}) pages to the Fingerprint Details page, the Details page will fetch data for the statement with the provided application name. For Overview pages, this is the application name of the selected row. For Insight details, this is the application of the execution that generated the insight. #101164 +- Updated the [Network Latency]({% link v23.1/ui-network-latency-page.md %}) side nav name and Network Diagnostics page title to Network. Updated the [Advanced Debugging]({% link v23.1/ui-debug-pages.md %}) page title to Advanced Debug. #101758 +- Added an option to select the trace rate for [Statement Diagnostics]({% link v23.1/ui-statements-page.md %}#diagnostics) collection. #101759 +- Added a time scale selector to the [Statement Diagnostics]({% link v23.1/ui-statements-page.md %}#diagnostics) page, making it possible to see bundles only from the selected period. #101802 +- Added draining node as its own value on the [DB Console Overview]({% link v23.1/ui-overview.md %}) page, instead of counting it as a dead node. #101793 +- Added the ability for users to view timestamps in [DB Console]({% link v23.1/ui-overview.md %}) in their preferred timezone via the cluster setting `ui.display_timezone`. Previously, only the timezones Coordinated Universal Time and America/New_York were supported. #102195 +- An alert on [DB Console Overview]({% link v23.1/ui-overview.md %}) page is shown when the cluster setting `cluster.preserve_downgrade_option` is set, and no longer waits 48 hours to show. #102913 +- Added [Transaction Insights]({% link cockroachcloud/insights-page.md %}) for Serverless. #103363 +- Added `_status/load` to the list of Raw Status Endpoints on the [Advanced Debug]({% link v23.1/ui-debug-pages.md %}) page. #103420 +- If a page crashed, a force refresh is no longer required to be able to see the other pages on [DB Console]({% link v23.1/ui-overview.md %}). #103328 +- The filter on the [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) page is now working properly. Fixed the type on the JSON object from `stmtTyp` to `stmtType`. #103411 +- Added missing information on the [Index Details]({% link v23.1/ui-databases-page.md %}#index-recommendations) page about latency information of most used fingerprints. #103421

Bug fixes

-- Fixed the `sql.mem.distsql.current` [metric]({% link v23.1/metrics.md %}) so it would no longer double count the memory usage of remote DistSQL flows. [#100049][#100049] -- Fixed a rare bug introduced prior to v22.1 where distributed plans could cause the graceful drain of a node to become stuck retrying forever during [node shutdown]({% link v23.1/node-shutdown.md %}). This bug led to errors like `drain details: distSQL execution flows:`, together with a non-zero number of flows that does not reduce over a long period of time. [#100841][#100841] -- Fixed a bug that caused a [restore]({% link v23.1/restore.md %}) to fail occasionally due to incorrect schema ID resolution when restoring a backup with [user-defined schemas]({% link v23.1/schema-design-schema.md %}). [#101309][#101309] -- Fixed a bug that caused suboptimal query plans to be generated by [the optimizer]({% link v23.1/cost-based-optimizer.md %}) when the table being queried contained infinite values, e.g., `'+Infinity'::DECIMAL`. This bug was present since v22.1 (and likely earlier). It could also be triggered in rare cases when [table statistics]({% link v23.1/show-statistics.md %}) forecasts created a forecasted bucket with an infinite value. [#101135][#101135] -- Fixed a rare internal error in [the optimizer]({% link v23.1/cost-based-optimizer.md %}) that existed since before v22.1, which could occur while enforcing orderings between SQL operators. [#101173][#101173] -- Fixed a bug so that `crdb_internal.deserialize_session` internal function works properly with prepared statements that have more param type hints than params. [#101367][#101367] -- Fixed a bug that caused internal errors when executing [user-defined functions]({% link v23.1/user-defined-functions.md %}) with empty bodies. This bug was only present in alpha pre-release versions of 23.1. [#101382][#101382] -- The `search_path` [session variable]({% link v23.1/set-vars.md %}) now supports schema names that have commas in them. Also, fixed a bug in parsing a `search_path` with a quote in it when specified in the [connection string]({% link v23.1/connection-parameters.md %}). [#101493][#101493] -- Fixed a bug that has existed since [user-defined functions]({% link v23.1/user-defined-functions.md %}) were introduced that could cause a function call to resolve to the wrong function after changes to the [schema `search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema). [#101491][#101491] -- Fixed an internal error that could occur when the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}) is on and the input to the lookup join is a `SELECT` of scalar expressions (e.g., `1+1`). Also, [subqueries]({% link v23.1/subqueries.md %}) with no home region now error out with `enforce_home_region` set. [#101483][#101483] -- Previously, CockroachDB alpha and beta versions of 23.1 would panic on [`cockroach start`]({% link v23.1/cockroach-start.md %}) command when the `GOMEMLIMIT` environment variable was set and the `--max-go-memory` flag wasn't specified. This is now fixed. [#101564][#101564] -- Fixed a bug whereby some tables' physical disk space could not be calculated by [the storage engine](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer). [#100939][#100939] -- Fixed a bug that caused errors in test builds and potentially incorrect results in release builds when invoking a [user-defined function]({% link v23.1/user-defined-functions.md %}) with a subquery argument. This bug was only present in v23.1 alpha versions. [#101641][#101641] +- Fixed the `sql.mem.distsql.current` [metric]({% link v23.1/metrics.md %}) so it would no longer double count the memory usage of remote DistSQL flows. #100049 +- Fixed a rare bug introduced prior to v22.1 where distributed plans could cause the graceful drain of a node to become stuck retrying forever during [node shutdown]({% link v23.1/node-shutdown.md %}). This bug led to errors like `drain details: distSQL execution flows:`, together with a non-zero number of flows that does not reduce over a long period of time. #100841 +- Fixed a bug that caused a [restore]({% link v23.1/restore.md %}) to fail occasionally due to incorrect schema ID resolution when restoring a backup with [user-defined schemas]({% link v23.1/schema-design-schema.md %}). #101309 +- Fixed a bug that caused suboptimal query plans to be generated by [the optimizer]({% link v23.1/cost-based-optimizer.md %}) when the table being queried contained infinite values, e.g., `'+Infinity'::DECIMAL`. This bug was present since v22.1 (and likely earlier). It could also be triggered in rare cases when [table statistics]({% link v23.1/show-statistics.md %}) forecasts created a forecasted bucket with an infinite value. #101135 +- Fixed a rare internal error in [the optimizer]({% link v23.1/cost-based-optimizer.md %}) that existed since before v22.1, which could occur while enforcing orderings between SQL operators. #101173 +- Fixed a bug so that `crdb_internal.deserialize_session` internal function works properly with prepared statements that have more param type hints than params. #101367 +- Fixed a bug that caused internal errors when executing [user-defined functions]({% link v23.1/user-defined-functions.md %}) with empty bodies. This bug was only present in alpha pre-release versions of 23.1. #101382 +- The `search_path` [session variable]({% link v23.1/set-vars.md %}) now supports schema names that have commas in them. Also, fixed a bug in parsing a `search_path` with a quote in it when specified in the [connection string]({% link v23.1/connection-parameters.md %}). #101493 +- Fixed a bug that has existed since [user-defined functions]({% link v23.1/user-defined-functions.md %}) were introduced that could cause a function call to resolve to the wrong function after changes to the [schema `search_path`]({% link v23.1/sql-name-resolution.md %}#current-schema). #101491 +- Fixed an internal error that could occur when the `enforce_home_region` [session setting]({% link v23.1/set-vars.md %}) is on and the input to the lookup join is a `SELECT` of scalar expressions (e.g., `1+1`). Also, [subqueries]({% link v23.1/subqueries.md %}) with no home region now error out with `enforce_home_region` set. #101483 +- Previously, CockroachDB alpha and beta versions of 23.1 would panic on [`cockroach start`]({% link v23.1/cockroach-start.md %}) command when the `GOMEMLIMIT` environment variable was set and the `--max-go-memory` flag wasn't specified. This is now fixed. #101564 +- Fixed a bug whereby some tables' physical disk space could not be calculated by [the storage engine](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer). #100939 +- Fixed a bug that caused errors in test builds and potentially incorrect results in release builds when invoking a [user-defined function]({% link v23.1/user-defined-functions.md %}) with a subquery argument. This bug was only present in v23.1 alpha versions. #101641 - Point [inserts]({% link v23.1/insert.md %}) and [updates]({% link v23.1/update.md %}) that write to a remote region of a table created with the [`REGIONAL BY ROW AS`]({% link v23.1/create-table.md %}#create-a-table-with-a-regional-by-row-locality-using-a-custom-region-column) clause will now error out. -- Fixed a bug in the [built-in functions]({% link v23.1/functions-and-operators.md %}) `pg_get_indexdef` and `col_description` that could cause the functions to return errors if the user created tables named `pg_indexes` or `pg_attribute`. Or, if the user created a schema named `system` with a table named `comments`. This bug was only present in pre-release versions of v23.1. [#101688][#101688] -- The descriptions of `rebalancing.readbytespersecond` and `rebalancing.writebytespersecond` metrics now correctly reference bytes read and bytes written, respectively. [#101709][#101709] -- Fixed a bug to ensure that the [list of fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) used per index is shown even when there is a max-size limit on the SQL API. [#101783][#101783] -- Fixed a bug where, when CockroachDB failed to retrieve [contention information]({% link v23.1/performance-best-practices-overview.md %}#transaction-contention), the full [Insights]({% link v23.1/ui-insights-page.md %}) page would return an error. Now, the Insights page will load even when there is an issue with decoding contention information. [#101782][#101782] -- Fixed a bug where a [`RESTORE`]({% link v23.1/restore.md %}) operation with `skip_localities_check` could fail with errors if regions were missing on a cluster. [#101797][#101797] -- Fixed a bug in [the optimizer]({% link v23.1/cost-based-optimizer.md %}) that could cause an internal error in rare cases for a query with [outer joins]({% link v23.1/joins.md %}#full-outer-joins) that could be simplified to non-outer joins and at least one semi-join. This bug was present since before v22.1. [#100670][#100670] -- Fixed a bug where CockroachDB previously incorrectly evaluated [`EXPORT`]({% link v23.1/export.md %}) statements that had projections or rendering on top of the `EXPORT` (e.g. the [common table expression]({% link v23.1/common-table-expressions.md %}) `WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;`). Such statements would result in panics or incorrect query results. Note that the exported data wasn't affected, only the presentation of the query result. This bug had been present since v22.1 or earlier. [#101805][#101805] -- Fixed a bug in [the optimizer]({% link v23.1/cost-based-optimizer.md %}) introduced in v22.2.0 that could cause queries containing a [subquery]({% link v23.1/subqueries.md %}) with a lateral join, in which the right side of the lateral join was an [aliased data source]({% link v23.1/table-expressions.md %}#aliased-table-expressions), to return an internal error in some cases. For example, it could cause an error if the subquery was provided as an argument to an [aggregate function]({% link v23.1/functions-and-operators.md %}#aggregate-functions). [#101863][#101863] -- Fixed handling of expressions in `IN` clauses such as `(c1, c2) IN (SELECT c3+1, c4+1 FROM ...)`. Previously, such query expressions would error out during type checking. [#102027][#102027] -- Fixed a potential bug whereby a failed or cancelled [`IMPORT`]({% link v23.1/import.md %}) could leave some of the imported rows behind after it was cancelled, in the rare event that the writing processes were slow enough to continue writing after the cleanup process had started. [#101443][#101443] -- Fixed a bug that could cause incorrect results for queries invoking `STRICT` [user-defined functions]({% link v23.1/user-defined-functions.md %}). This bug was only present in pre-release versions of 23.1. [#101950][#101950] -- Fixed a bug where CockroachDB's [pgwire]({% link v23.1/postgresql-compatibility.md %}) implementation would incorrectly parse arrays if they were sent as placeholder arguments to a prepared statement, and the argument had spaces in between the array elements. [#101596][#101596] -- Fixed a very rare bug that could cause keys to be unexpectedly deleted locally within a [store]({% link v23.1/cockroach-start.md %}#flags-store) by [replica rebalancing](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer) during a write heavy workload. [#102167][#102167] -- Fixed a bug in the behavior of the `enforce_home_region` [session variable]({% link v23.1/set-vars.md %}) that may have allowed a hash join to be favored over a lookup join, or failed to error out remote accesses done by uniqueness checks for mutations on [`REGIONAL BY ROW` tables]({% link v23.1/regional-tables.md %}). Also, fixed static erroring of some locality-optimized lookup joins to now be handled dynamically during query execution. [#102207][#102207] -- Fixed a bug introduced in testing releases of v23.1 where a node could crash when evaluating a [`COPY`]({% link v23.1/copy.md %}) command when the schema had `INT2` or `INT4` type. [#102306][#102306] -- Fixed a bug where a [backup]({% link v23.1/backup-and-restore-overview.md %}) with a key's revision history split across multiple [SST files](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer#ssts) may not have correctly restored the proper revision of the key. [#102343][#102343] -- Fixed a bug in testing releases of v23.1 where a user could be prevented from logging in or viewing or changing [`GRANT`s]({% link v23.1/grant.md %}) if the cluster had a long enough period of inactivity. [#102489][#102489] -- Previously, CockroachDB could encounter a "command is too large" error when evaluating [`UPSERT`]({% link v23.1/upsert.md %}) statements such that the new values combined exceeded the size of the `kv.raft.command.max_size` cluster setting. This bug had been present since before v21.1 and initially all write operations ([`INSERT`]({% link v23.1/insert.md %}), [`UPDATE`]({% link v23.1/update.md %}), [`DELETE`]({% link v23.1/delete.md %})) were affected; however, in v21.2 those three were fixed, but `UPSERT` was forgotten about. This is now fixed. [#102514][#102514] -- Fixed a bug introduced in v22.1.19, v22.2.8, and pre-release versions of 23.1 that could cause queries to return spurious insufficient [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) errors. For the bug to occur, two databases would need to have duplicate tables each with a [foreign key]({% link v23.1/foreign-key.md %}) reference to another table. The error would then occur if the same SQL string was executed against both databases concurrently by users that have [privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) over only one of the tables. [#102626][#102626] -- Fixed a bug where [`RENAME COLUMN`]({% link v23.1/alter-table.md %}#rename-column) was incorrectly allowed and would modify these columns node-wide. [#102460][#102460] -- Fixed a bug where the internal `node` pseudo-role was not viewable through introspection of `pg_catalog` tables. [#102662][#102662] -- Fixed a bug where [`COPY ... TO`]({% link v23.1/copy.md %}) statements would always fail when used in a prepared statement. CockroachDB now matches the pgwire handling of prepared `COPY ... TO` statements. [#102663][#102663] -- Fixed a bug where the [`ALTER DEFAULT PRIVILEGES ... GRANT USAGE ON SEQUENCES`]({% link v23.1/alter-default-privileges.md %}) statement would fail because the sequence object was mapped to an incorrect internal privilege object. [#102724][#102724] -- Fixed a minor bug that caused an internal error for some queries with nested [subqueries]({% link v23.1/subqueries.md %}) instead of the more appropriate "could not decorrelate subquery" error. This bug was only present in pre-release alpha and beta versions of 23.1. [#102384][#102384] -- Fixed a bug that allowed values to be inserted into an [`ARRAY`]({% link v23.1/array.md %})-type column that did not conform to the inner-type of the array. For example, it was possible to insert `ARRAY['foo']` into a column of type `CHAR(1)[]`. This could cause incorrect results when querying the table. The insert now errors, which is expected. This bug was present since v21.1. [#102807][#102807] -- Fixed an issue where running `SHOW HISTOGRAM` to see the histogram for an [`ENUM`]({% link v23.1/enum.md %})-type column could cause a panic and crash the cockroach process. This issue has existed since v20.2.0. [#102828][#102828] -- Fixed the behavior of [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) to return its results as a row instead of a tuple when UDFs are called in a query as a data source. This is now compatible with PostgreSQL behavior. [#102188][#102188] -- Fixed a bug in [closed timestamp](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#closed-timestamps) updates within its side-transport. Previously, during asymmetric partitions, a node that transfers a lease away, and misses a liveness heartbeat, could then erroneously update the closed timestamp during the stasis period of its liveness. This could lead to closed timestamp invariant violation, and node crashes; in extreme cases, this could lead to inconsistencies in read-only queries. [#102601][#102601] -- Fixed a bug where [Row-level TTL]({% link v23.1/row-level-ttl.md %}) jobs would incorrectly process a table that spanned multiple ranges in rare cases. This bug had been present since v22.2.0. You were affected by this bug if you saw the error message `"error decoding EncDatum of ...: did not find terminator ... in buffer ..."`. [#102881][#102881] -- The `Retry.Max` field of the [`webhook_sink_config`]({% link v23.1/changefeed-sinks.md %}#webhook-sink) option for changefeeds was not correctly defined in the docs. The existing docs mention that it is "The maximum amount of time the sink will retry a single HTTP request to send a batch", but this is incorrect. It actually represents the maximum number of retries which will be attempted when sending a batch in an HTTP request fails. This is now fixed to correctly capture its behavior. Also, fixed a bug where the retry time would wait for 4 seconds, regardless of `Retry.Max`. With this change, the new maximum retry time is 30 seconds. The initial backoff will keep doubling every time a retry occurs until the maximum of 30 seconds is reached. E.g. If `Retry.Max = 4` and the initial backoff is 10 seconds, at most 4 retries will be performed with the backoff times 10, 20, 30, 30 seconds respectively. [#102958][#102958] -- Stopped using a `NULL` value for `pg_constraint.conparentid`. Now the value is hard-coded to `0`, since CockroachDB does not support constraints on [partitions]({% link v23.1/partitioning.md %}). [#103234][#103234] -- Fixed a bug where [`COPY`]({% link v23.1/copy.md %}) in v23.1.0 and beta versions would incorrectly encode data with multiple column families. The data must be dropped and re-imported to be encoded correctly. [#103355][#103355] -- Optimized over-head of [`pg_catalog.pg_description`]({% link v23.1/pg-catalog.md %}) and [`pg_catalog.pg_shdescription`]({% link v23.1/pg-catalog.md %}), which can lead to performance regression relative to v22.2 [#103331][#103331] -- Timeseries [metric]({% link v23.1/metrics.md %}) counts will now show cumulative counts for a histogram rather than a windowed count. A `-sum` timeseries is also exported to keep track of the cumulative sum of all samples in the histogram. [#103444][#103444] +- Fixed a bug in the [built-in functions]({% link v23.1/functions-and-operators.md %}) `pg_get_indexdef` and `col_description` that could cause the functions to return errors if the user created tables named `pg_indexes` or `pg_attribute`. Or, if the user created a schema named `system` with a table named `comments`. This bug was only present in pre-release versions of v23.1. #101688 +- The descriptions of `rebalancing.readbytespersecond` and `rebalancing.writebytespersecond` metrics now correctly reference bytes read and bytes written, respectively. #101709 +- Fixed a bug to ensure that the [list of fingerprints]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-view) used per index is shown even when there is a max-size limit on the SQL API. #101783 +- Fixed a bug where, when CockroachDB failed to retrieve [contention information]({% link v23.1/performance-best-practices-overview.md %}#transaction-contention), the full [Insights]({% link v23.1/ui-insights-page.md %}) page would return an error. Now, the Insights page will load even when there is an issue with decoding contention information. #101782 +- Fixed a bug where a [`RESTORE`]({% link v23.1/restore.md %}) operation with `skip_localities_check` could fail with errors if regions were missing on a cluster. #101797 +- Fixed a bug in [the optimizer]({% link v23.1/cost-based-optimizer.md %}) that could cause an internal error in rare cases for a query with [outer joins]({% link v23.1/joins.md %}#full-outer-joins) that could be simplified to non-outer joins and at least one semi-join. This bug was present since before v22.1. #100670 +- Fixed a bug where CockroachDB previously incorrectly evaluated [`EXPORT`]({% link v23.1/export.md %}) statements that had projections or rendering on top of the `EXPORT` (e.g. the [common table expression]({% link v23.1/common-table-expressions.md %}) `WITH cte AS (EXPORT INTO CSV 'nodelocal://1/export1/' FROM SELECT * FROM t) SELECT filename FROM cte;`). Such statements would result in panics or incorrect query results. Note that the exported data wasn't affected, only the presentation of the query result. This bug had been present since v22.1 or earlier. #101805 +- Fixed a bug in [the optimizer]({% link v23.1/cost-based-optimizer.md %}) introduced in v22.2.0 that could cause queries containing a [subquery]({% link v23.1/subqueries.md %}) with a lateral join, in which the right side of the lateral join was an [aliased data source]({% link v23.1/table-expressions.md %}#aliased-table-expressions), to return an internal error in some cases. For example, it could cause an error if the subquery was provided as an argument to an [aggregate function]({% link v23.1/functions-and-operators.md %}#aggregate-functions). #101863 +- Fixed handling of expressions in `IN` clauses such as `(c1, c2) IN (SELECT c3+1, c4+1 FROM ...)`. Previously, such query expressions would error out during type checking. #102027 +- Fixed a potential bug whereby a failed or cancelled [`IMPORT`]({% link v23.1/import.md %}) could leave some of the imported rows behind after it was cancelled, in the rare event that the writing processes were slow enough to continue writing after the cleanup process had started. #101443 +- Fixed a bug that could cause incorrect results for queries invoking `STRICT` [user-defined functions]({% link v23.1/user-defined-functions.md %}). This bug was only present in pre-release versions of 23.1. #101950 +- Fixed a bug where CockroachDB's [pgwire]({% link v23.1/postgresql-compatibility.md %}) implementation would incorrectly parse arrays if they were sent as placeholder arguments to a prepared statement, and the argument had spaces in between the array elements. #101596 +- Fixed a very rare bug that could cause keys to be unexpectedly deleted locally within a [store]({% link v23.1/cockroach-start.md %}#flags-store) by [replica rebalancing](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer) during a write heavy workload. #102167 +- Fixed a bug in the behavior of the `enforce_home_region` [session variable]({% link v23.1/set-vars.md %}) that may have allowed a hash join to be favored over a lookup join, or failed to error out remote accesses done by uniqueness checks for mutations on [`REGIONAL BY ROW` tables]({% link v23.1/regional-tables.md %}). Also, fixed static erroring of some locality-optimized lookup joins to now be handled dynamically during query execution. #102207 +- Fixed a bug introduced in testing releases of v23.1 where a node could crash when evaluating a [`COPY`]({% link v23.1/copy.md %}) command when the schema had `INT2` or `INT4` type. #102306 +- Fixed a bug where a [backup]({% link v23.1/backup-and-restore-overview.md %}) with a key's revision history split across multiple [SST files](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer#ssts) may not have correctly restored the proper revision of the key. #102343 +- Fixed a bug in testing releases of v23.1 where a user could be prevented from logging in or viewing or changing [`GRANT`s]({% link v23.1/grant.md %}) if the cluster had a long enough period of inactivity. #102489 +- Previously, CockroachDB could encounter a "command is too large" error when evaluating [`UPSERT`]({% link v23.1/upsert.md %}) statements such that the new values combined exceeded the size of the `kv.raft.command.max_size` cluster setting. This bug had been present since before v21.1 and initially all write operations ([`INSERT`]({% link v23.1/insert.md %}), [`UPDATE`]({% link v23.1/update.md %}), [`DELETE`]({% link v23.1/delete.md %})) were affected; however, in v21.2 those three were fixed, but `UPSERT` was forgotten about. This is now fixed. #102514 +- Fixed a bug introduced in v22.1.19, v22.2.8, and pre-release versions of 23.1 that could cause queries to return spurious insufficient [privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) errors. For the bug to occur, two databases would need to have duplicate tables each with a [foreign key]({% link v23.1/foreign-key.md %}) reference to another table. The error would then occur if the same SQL string was executed against both databases concurrently by users that have [privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization) over only one of the tables. #102626 +- Fixed a bug where [`RENAME COLUMN`]({% link v23.1/alter-table.md %}#rename-column) was incorrectly allowed and would modify these columns node-wide. #102460 +- Fixed a bug where the internal `node` pseudo-role was not viewable through introspection of `pg_catalog` tables. #102662 +- Fixed a bug where [`COPY ... TO`]({% link v23.1/copy.md %}) statements would always fail when used in a prepared statement. CockroachDB now matches the pgwire handling of prepared `COPY ... TO` statements. #102663 +- Fixed a bug where the [`ALTER DEFAULT PRIVILEGES ... GRANT USAGE ON SEQUENCES`]({% link v23.1/alter-default-privileges.md %}) statement would fail because the sequence object was mapped to an incorrect internal privilege object. #102724 +- Fixed a minor bug that caused an internal error for some queries with nested [subqueries]({% link v23.1/subqueries.md %}) instead of the more appropriate "could not decorrelate subquery" error. This bug was only present in pre-release alpha and beta versions of 23.1. #102384 +- Fixed a bug that allowed values to be inserted into an [`ARRAY`]({% link v23.1/array.md %})-type column that did not conform to the inner-type of the array. For example, it was possible to insert `ARRAY['foo']` into a column of type `CHAR(1)[]`. This could cause incorrect results when querying the table. The insert now errors, which is expected. This bug was present since v21.1. #102807 +- Fixed an issue where running `SHOW HISTOGRAM` to see the histogram for an [`ENUM`]({% link v23.1/enum.md %})-type column could cause a panic and crash the cockroach process. This issue has existed since v20.2.0. #102828 +- Fixed the behavior of [user-defined functions (UDFs)]({% link v23.1/user-defined-functions.md %}) to return its results as a row instead of a tuple when UDFs are called in a query as a data source. This is now compatible with PostgreSQL behavior. #102188 +- Fixed a bug in [closed timestamp](https://www.cockroachlabs.com/docs/v23.1/architecture/transaction-layer#closed-timestamps) updates within its side-transport. Previously, during asymmetric partitions, a node that transfers a lease away, and misses a liveness heartbeat, could then erroneously update the closed timestamp during the stasis period of its liveness. This could lead to closed timestamp invariant violation, and node crashes; in extreme cases, this could lead to inconsistencies in read-only queries. #102601 +- Fixed a bug where [Row-level TTL]({% link v23.1/row-level-ttl.md %}) jobs would incorrectly process a table that spanned multiple ranges in rare cases. This bug had been present since v22.2.0. You were affected by this bug if you saw the error message `"error decoding EncDatum of ...: did not find terminator ... in buffer ..."`. #102881 +- The `Retry.Max` field of the [`webhook_sink_config`]({% link v23.1/changefeed-sinks.md %}#webhook-sink) option for changefeeds was not correctly defined in the docs. The existing docs mention that it is "The maximum amount of time the sink will retry a single HTTP request to send a batch", but this is incorrect. It actually represents the maximum number of retries which will be attempted when sending a batch in an HTTP request fails. This is now fixed to correctly capture its behavior. Also, fixed a bug where the retry time would wait for 4 seconds, regardless of `Retry.Max`. With this change, the new maximum retry time is 30 seconds. The initial backoff will keep doubling every time a retry occurs until the maximum of 30 seconds is reached. E.g. If `Retry.Max = 4` and the initial backoff is 10 seconds, at most 4 retries will be performed with the backoff times 10, 20, 30, 30 seconds respectively. #102958 +- Stopped using a `NULL` value for `pg_constraint.conparentid`. Now the value is hard-coded to `0`, since CockroachDB does not support constraints on [partitions]({% link v23.1/partitioning.md %}). #103234 +- Fixed a bug where [`COPY`]({% link v23.1/copy.md %}) in v23.1.0 and beta versions would incorrectly encode data with multiple column families. The data must be dropped and re-imported to be encoded correctly. #103355 +- Optimized over-head of [`pg_catalog.pg_description`]({% link v23.1/pg-catalog.md %}) and [`pg_catalog.pg_shdescription`]({% link v23.1/pg-catalog.md %}), which can lead to performance regression relative to v22.2 #103331 +- Timeseries [metric]({% link v23.1/metrics.md %}) counts will now show cumulative counts for a histogram rather than a windowed count. A `-sum` timeseries is also exported to keep track of the cumulative sum of all samples in the histogram. #103444 - Fixed a bug where CockroachDB could produce incorrect results when evaluating queries with [`ORDER BY`]({% link v23.1/order-by.md %}) clause in rare circumstances. In particular, some rows could be duplicated if all of the following conditions were met: 1. The query had a `LIMIT` clause. 1. The `SORT` operation had to spill to disk (meaning that `LIMIT` number of rows used up non-trivial amounts of memory, e.g. the rows were "wide"). 1. The `ORDER BY` clause contained multiple columns **and** the ordering on the prefix of those columns was already provided by the index. - The bug has been present since at least v22.1. [#102790][#102790] -- Fixed a bug where CockroachDB could previously encounter a nil pointer crash when populating data for [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) page in some rare cases. The bug was present in v22.2.9 and v23.1.1 releases. [#103521][#103521] -- Fixed calls to undefined objects. [#103520][#103520] -- Fixed a bug where `0` with `exponents < -6` would display as `0E(exponent)` instead of printing all `0`s, e.g. `0E-7` should be `0.0000000`. [#103640][#103640] -- Fixed a bug that could prevent [`RESTORE`]({% link v23.1/restore.md %}) from working if the backup had a refresh materialized view mutation in it. [#103233][#103233] -- In earlier patch releases of v23.1, it was possible for [backups]({% link v23.1/backup-and-restore-overview.md %}) to be excessively slow, slower than they were in earlier releases. It was also possible for them to fail with errors of the following form: `operation "Export Request for span ..." timed out after 5m0.001s`. At least one of the reasons for this behavior is now addressed. This problem also affected v22.2 clusters if using a hidden-by-default, default-as-disabled `admission.elastic_cpu.enabled` cluster setting. [#103626][#103626] -- Fixed a crash/panic that could occur if placeholder arguments were used with the `with_min_timestamp(to_timestamp($1))` [functions]({% link v23.1/functions-and-operators.md %}). [#103630][#103630] -- Fixed a panic that could occur if a [`COPY TO`]({% link v23.1/copy.md %}) statement had a subquery that was logged with redaction markers. [#103689][#103689] -- Fixed a bug where [`SET PRIMARY REGION`]({% link v23.1/alter-database.md %}#set-primary-region) and [`SET SECONDARY REGION`]({% link v23.1/alter-database.md %}#set-secondary-region) did not validate transactionally, which could prevent cleaning up removed regions after a [restore]({% link v23.1/restore.md %}). [#103631][#103631] -- [`DROP ROLE`]({% link v23.1/drop-role.md %}) now correctly returns a 2BP01 error when the given role has been granted privileges on a schema. [#103546][#103546] -- Fixed a bug where, under high CPU load, HTTP requests to certain API endpoints such as the `health` endpoint would start failing and then never succeed again until the node was restarted. This bug was introduced in v23.1. [#103775][#103775] -- When the option `WITH TABLES` or `WITH INDEXES` is passed to [`SHOW RANGES`]({% link v23.1/show-ranges.md %}), the per-object start/end key columns now properly refers to the part of the object included inside the range identified by the current row. Previously, they could incorrectly point to keys outside of the current range's boundaries. This bug had been introduced in v23.1. [#103777][#103777] -- Fixed a bug in `VALUES` clauses containing a call to a record-returning [UDF]({% link v23.1/user-defined-functions.md %}) that could manifest as an internal error in some queries. [#103639][#103639] -- Fixed a bug with choosing optimal plan where [prepared statements]({% link v23.1/savepoint.md %}#savepoints-and-prepared-statements) using placeholders in recursive CTEs sometimes did not re-optimize correctly after plugging in the parameters. [#99823][#99823] -- Fixed a bug where `kv` was read when fetching a qualified name of a leased [UDF]({% link v23.1/user-defined-functions.md %}). [#103089][#103089] + The bug has been present since at least v22.1. #102790 +- Fixed a bug where CockroachDB could previously encounter a nil pointer crash when populating data for [SQL Activity]({% link v23.1/ui-overview.md %}#sql-activity) page in some rare cases. The bug was present in v22.2.9 and v23.1.1 releases. #103521 +- Fixed calls to undefined objects. #103520 +- Fixed a bug where `0` with `exponents < -6` would display as `0E(exponent)` instead of printing all `0`s, e.g. `0E-7` should be `0.0000000`. #103640 +- Fixed a bug that could prevent [`RESTORE`]({% link v23.1/restore.md %}) from working if the backup had a refresh materialized view mutation in it. #103233 +- In earlier patch releases of v23.1, it was possible for [backups]({% link v23.1/backup-and-restore-overview.md %}) to be excessively slow, slower than they were in earlier releases. It was also possible for them to fail with errors of the following form: `operation "Export Request for span ..." timed out after 5m0.001s`. At least one of the reasons for this behavior is now addressed. This problem also affected v22.2 clusters if using a hidden-by-default, default-as-disabled `admission.elastic_cpu.enabled` cluster setting. #103626 +- Fixed a crash/panic that could occur if placeholder arguments were used with the `with_min_timestamp(to_timestamp($1))` [functions]({% link v23.1/functions-and-operators.md %}). #103630 +- Fixed a panic that could occur if a [`COPY TO`]({% link v23.1/copy.md %}) statement had a subquery that was logged with redaction markers. #103689 +- Fixed a bug where [`SET PRIMARY REGION`]({% link v23.1/alter-database.md %}#set-primary-region) and [`SET SECONDARY REGION`]({% link v23.1/alter-database.md %}#set-secondary-region) did not validate transactionally, which could prevent cleaning up removed regions after a [restore]({% link v23.1/restore.md %}). #103631 +- [`DROP ROLE`]({% link v23.1/drop-role.md %}) now correctly returns a 2BP01 error when the given role has been granted privileges on a schema. #103546 +- Fixed a bug where, under high CPU load, HTTP requests to certain API endpoints such as the `health` endpoint would start failing and then never succeed again until the node was restarted. This bug was introduced in v23.1. #103775 +- When the option `WITH TABLES` or `WITH INDEXES` is passed to [`SHOW RANGES`]({% link v23.1/show-ranges.md %}), the per-object start/end key columns now properly refers to the part of the object included inside the range identified by the current row. Previously, they could incorrectly point to keys outside of the current range's boundaries. This bug had been introduced in v23.1. #103777 +- Fixed a bug in `VALUES` clauses containing a call to a record-returning [UDF]({% link v23.1/user-defined-functions.md %}) that could manifest as an internal error in some queries. #103639 +- Fixed a bug with choosing optimal plan where [prepared statements]({% link v23.1/savepoint.md %}#savepoints-and-prepared-statements) using placeholders in recursive CTEs sometimes did not re-optimize correctly after plugging in the parameters. #99823 +- Fixed a bug where `kv` was read when fetching a qualified name of a leased [UDF]({% link v23.1/user-defined-functions.md %}). #103089

Performance improvements

-- Added an opt-in pacing mechanism to [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) closed timestamp notifications. Pacing is controlled by the `kv.rangefeed.closed_timestamp_smear_interval` [cluster setting]({% link v23.1/cluster-settings.md %}), which defaults to `kv.rangefeed.closed_timestamp_refresh_interval`. Lowering the smear interval makes the rangefeed closed timestamp delivery less spikey, which ultimately reduces its impact on foreground SQL latencies. [#99946][#99946] -- Queries that have [subqueries]({% link v23.1/subqueries.md %}) in equality expressions are now more efficiently planned by [the optimizer]({% link v23.1/cost-based-optimizer.md %}) when `optimizer_hoist_uncorrelated_equality_subqueries` is set to `true`. [#101753][#101753] -- [SQL Activity]({% link v23.1/ui-sql-dashboard.md %}) dashboards now default to using a table optimized with the top data for the most used cases. Else, they use persisted data if available, and in-memory data otherwise. [#102309][#102309] -- Statistics on the `system.jobs` table are now automatically collected, which will enable [the optimizer]({% link v23.1/cost-based-optimizer.md %}) to produce better query plans for internal queries that access the `system.jobs` table. This may result in better performance of the system. [#102637][#102637] -- Improved [changefeeds]({% link v23.1/change-data-capture-overview.md %}) to decrease the commit-to-emit latency (i.e. The difference between an event's MVCC timestamp and the time it is ready to emit to a downstream sink). Previously, it is determined by a non-documented cluster setting (`changefeed.experimental_poll_interval`) which defaults to `1s`. To enable this performance improvement, users should "lock" the watched table with [`ALTER TABLE SET (schema_locked =t);`]({% link v23.1/alter-table.md %}#set-storage-parameter), which would disallow schema changes on ``. If a schema change statement is attempted on a locked table, CockroachDB will reject it and return an error. The user could lock the table either before creating a changefeed or, when a changefeed is running, CockroachDB will be able to detect that and enable this performance improvement automatically. If the user is running a changefeed on a locked table but wishes to perform schema changes to the table, they need to explicitly unlock the table first with [`ALTER TABLE RESET schema_locked`]({% link v23.1/alter-table.md %}#reset-storage-parameter). After the schema change completes, the user can lock the table again to re-gain this performance improvement. The changefeed job itself does not need to be modified in any way by the user (e.g. the user does not need to pause the job when locking/unlocking a table). This change is a "pure" optimization in that if the table is not locked, everything should still work the way it used to. [#102977][#102977] -- Improved performance when joining with the `pg_description` table. [#103331][#103331] -- Added concurrency to speed up the phase of the [restore]({% link v23.1/restore.md %}) that ingests backed up table statisitcs. [#102694][#102694] -- Added support for constrained scans using computed columns which are part of an [index]({% link v23.1/indexes.md %}) when there is an `IN` list or `OR`'ed predicate on the columns that appear in the computed column expression. [#103412][#103412] +- Added an opt-in pacing mechanism to [rangefeed]({% link v23.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) closed timestamp notifications. Pacing is controlled by the `kv.rangefeed.closed_timestamp_smear_interval` [cluster setting]({% link v23.1/cluster-settings.md %}), which defaults to `kv.rangefeed.closed_timestamp_refresh_interval`. Lowering the smear interval makes the rangefeed closed timestamp delivery less spikey, which ultimately reduces its impact on foreground SQL latencies. #99946 +- Queries that have [subqueries]({% link v23.1/subqueries.md %}) in equality expressions are now more efficiently planned by [the optimizer]({% link v23.1/cost-based-optimizer.md %}) when `optimizer_hoist_uncorrelated_equality_subqueries` is set to `true`. #101753 +- [SQL Activity]({% link v23.1/ui-sql-dashboard.md %}) dashboards now default to using a table optimized with the top data for the most used cases. Else, they use persisted data if available, and in-memory data otherwise. #102309 +- Statistics on the `system.jobs` table are now automatically collected, which will enable [the optimizer]({% link v23.1/cost-based-optimizer.md %}) to produce better query plans for internal queries that access the `system.jobs` table. This may result in better performance of the system. #102637 +- Improved [changefeeds]({% link v23.1/change-data-capture-overview.md %}) to decrease the commit-to-emit latency (i.e. The difference between an event's MVCC timestamp and the time it is ready to emit to a downstream sink). Previously, it is determined by a non-documented cluster setting (`changefeed.experimental_poll_interval`) which defaults to `1s`. To enable this performance improvement, users should "lock" the watched table with [`ALTER TABLE SET (schema_locked =t);`]({% link v23.1/alter-table.md %}#set-storage-parameter), which would disallow schema changes on ``. If a schema change statement is attempted on a locked table, CockroachDB will reject it and return an error. The user could lock the table either before creating a changefeed or, when a changefeed is running, CockroachDB will be able to detect that and enable this performance improvement automatically. If the user is running a changefeed on a locked table but wishes to perform schema changes to the table, they need to explicitly unlock the table first with [`ALTER TABLE RESET schema_locked`]({% link v23.1/alter-table.md %}#reset-storage-parameter). After the schema change completes, the user can lock the table again to re-gain this performance improvement. The changefeed job itself does not need to be modified in any way by the user (e.g. the user does not need to pause the job when locking/unlocking a table). This change is a "pure" optimization in that if the table is not locked, everything should still work the way it used to. #102977 +- Improved performance when joining with the `pg_description` table. #103331 +- Added concurrency to speed up the phase of the [restore]({% link v23.1/restore.md %}) that ingests backed up table statisitcs. #102694 +- Added support for constrained scans using computed columns which are part of an [index]({% link v23.1/indexes.md %}) when there is an `IN` list or `OR`'ed predicate on the columns that appear in the computed column expression. #103412 - Added two new statistics which are useful for tracking the efficiency of [snapshot transfers](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#snapshots). Some snapshots will always fail due to system level "races", but the goal is to keep it as low as possible. - `range.snapshots.recv-failed` - The number of snapshots sent attempts that are initiated but not accepted by the recipient. - - `range.snapshots.recv-unusable` - The number of snapshots that were fully transmitted but not used. [#101837][#101837] + - `range.snapshots.recv-unusable` - The number of snapshots that were fully transmitted but not used. #101837

Build changes

-- Updated the reported `Build Tag` for nightly (non-release) builds. [#101998][#101998] +- Updated the reported `Build Tag` for nightly (non-release) builds. #101998
@@ -172,134 +172,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#100049]: https://github.com/cockroachdb/cockroach/pull/100049 -[#100475]: https://github.com/cockroachdb/cockroach/pull/100475 -[#100509]: https://github.com/cockroachdb/cockroach/pull/100509 -[#100670]: https://github.com/cockroachdb/cockroach/pull/100670 -[#100841]: https://github.com/cockroachdb/cockroach/pull/100841 -[#100844]: https://github.com/cockroachdb/cockroach/pull/100844 -[#100939]: https://github.com/cockroachdb/cockroach/pull/100939 -[#100948]: https://github.com/cockroachdb/cockroach/pull/100948 -[#101094]: https://github.com/cockroachdb/cockroach/pull/101094 -[#101135]: https://github.com/cockroachdb/cockroach/pull/101135 -[#101164]: https://github.com/cockroachdb/cockroach/pull/101164 -[#101173]: https://github.com/cockroachdb/cockroach/pull/101173 -[#101301]: https://github.com/cockroachdb/cockroach/pull/101301 -[#101309]: https://github.com/cockroachdb/cockroach/pull/101309 -[#101367]: https://github.com/cockroachdb/cockroach/pull/101367 -[#101382]: https://github.com/cockroachdb/cockroach/pull/101382 -[#101392]: https://github.com/cockroachdb/cockroach/pull/101392 -[#101428]: https://github.com/cockroachdb/cockroach/pull/101428 -[#101443]: https://github.com/cockroachdb/cockroach/pull/101443 -[#101483]: https://github.com/cockroachdb/cockroach/pull/101483 -[#101491]: https://github.com/cockroachdb/cockroach/pull/101491 -[#101493]: https://github.com/cockroachdb/cockroach/pull/101493 -[#101507]: https://github.com/cockroachdb/cockroach/pull/101507 -[#101564]: https://github.com/cockroachdb/cockroach/pull/101564 -[#101596]: https://github.com/cockroachdb/cockroach/pull/101596 -[#101641]: https://github.com/cockroachdb/cockroach/pull/101641 -[#101669]: https://github.com/cockroachdb/cockroach/pull/101669 -[#101688]: https://github.com/cockroachdb/cockroach/pull/101688 -[#101697]: https://github.com/cockroachdb/cockroach/pull/101697 -[#101709]: https://github.com/cockroachdb/cockroach/pull/101709 -[#101753]: https://github.com/cockroachdb/cockroach/pull/101753 -[#101758]: https://github.com/cockroachdb/cockroach/pull/101758 -[#101759]: https://github.com/cockroachdb/cockroach/pull/101759 -[#101782]: https://github.com/cockroachdb/cockroach/pull/101782 -[#101783]: https://github.com/cockroachdb/cockroach/pull/101783 -[#101793]: https://github.com/cockroachdb/cockroach/pull/101793 -[#101797]: https://github.com/cockroachdb/cockroach/pull/101797 -[#101802]: https://github.com/cockroachdb/cockroach/pull/101802 -[#101805]: https://github.com/cockroachdb/cockroach/pull/101805 -[#101809]: https://github.com/cockroachdb/cockroach/pull/101809 -[#101837]: https://github.com/cockroachdb/cockroach/pull/101837 -[#101863]: https://github.com/cockroachdb/cockroach/pull/101863 -[#101870]: https://github.com/cockroachdb/cockroach/pull/101870 -[#101874]: https://github.com/cockroachdb/cockroach/pull/101874 -[#101877]: https://github.com/cockroachdb/cockroach/pull/101877 -[#101950]: https://github.com/cockroachdb/cockroach/pull/101950 -[#101996]: https://github.com/cockroachdb/cockroach/pull/101996 -[#101998]: https://github.com/cockroachdb/cockroach/pull/101998 -[#102002]: https://github.com/cockroachdb/cockroach/pull/102002 -[#102027]: https://github.com/cockroachdb/cockroach/pull/102027 -[#102167]: https://github.com/cockroachdb/cockroach/pull/102167 -[#102188]: https://github.com/cockroachdb/cockroach/pull/102188 -[#102195]: https://github.com/cockroachdb/cockroach/pull/102195 -[#102207]: https://github.com/cockroachdb/cockroach/pull/102207 -[#102274]: https://github.com/cockroachdb/cockroach/pull/102274 -[#102306]: https://github.com/cockroachdb/cockroach/pull/102306 -[#102309]: https://github.com/cockroachdb/cockroach/pull/102309 -[#102343]: https://github.com/cockroachdb/cockroach/pull/102343 -[#102379]: https://github.com/cockroachdb/cockroach/pull/102379 -[#102384]: https://github.com/cockroachdb/cockroach/pull/102384 -[#102392]: https://github.com/cockroachdb/cockroach/pull/102392 -[#102395]: https://github.com/cockroachdb/cockroach/pull/102395 -[#102460]: https://github.com/cockroachdb/cockroach/pull/102460 -[#102468]: https://github.com/cockroachdb/cockroach/pull/102468 -[#102489]: https://github.com/cockroachdb/cockroach/pull/102489 -[#102514]: https://github.com/cockroachdb/cockroach/pull/102514 -[#102601]: https://github.com/cockroachdb/cockroach/pull/102601 -[#102626]: https://github.com/cockroachdb/cockroach/pull/102626 -[#102627]: https://github.com/cockroachdb/cockroach/pull/102627 -[#102637]: https://github.com/cockroachdb/cockroach/pull/102637 -[#102662]: https://github.com/cockroachdb/cockroach/pull/102662 -[#102663]: https://github.com/cockroachdb/cockroach/pull/102663 -[#102694]: https://github.com/cockroachdb/cockroach/pull/102694 -[#102700]: https://github.com/cockroachdb/cockroach/pull/102700 -[#102724]: https://github.com/cockroachdb/cockroach/pull/102724 -[#102790]: https://github.com/cockroachdb/cockroach/pull/102790 -[#102807]: https://github.com/cockroachdb/cockroach/pull/102807 -[#102828]: https://github.com/cockroachdb/cockroach/pull/102828 -[#102862]: https://github.com/cockroachdb/cockroach/pull/102862 -[#102881]: https://github.com/cockroachdb/cockroach/pull/102881 -[#102913]: https://github.com/cockroachdb/cockroach/pull/102913 -[#102947]: https://github.com/cockroachdb/cockroach/pull/102947 -[#102958]: https://github.com/cockroachdb/cockroach/pull/102958 -[#102977]: https://github.com/cockroachdb/cockroach/pull/102977 -[#102989]: https://github.com/cockroachdb/cockroach/pull/102989 -[#103073]: https://github.com/cockroachdb/cockroach/pull/103073 -[#103089]: https://github.com/cockroachdb/cockroach/pull/103089 -[#103144]: https://github.com/cockroachdb/cockroach/pull/103144 -[#103233]: https://github.com/cockroachdb/cockroach/pull/103233 -[#103234]: https://github.com/cockroachdb/cockroach/pull/103234 -[#103328]: https://github.com/cockroachdb/cockroach/pull/103328 -[#103331]: https://github.com/cockroachdb/cockroach/pull/103331 -[#103355]: https://github.com/cockroachdb/cockroach/pull/103355 -[#103363]: https://github.com/cockroachdb/cockroach/pull/103363 -[#103411]: https://github.com/cockroachdb/cockroach/pull/103411 -[#103412]: https://github.com/cockroachdb/cockroach/pull/103412 -[#103420]: https://github.com/cockroachdb/cockroach/pull/103420 -[#103421]: https://github.com/cockroachdb/cockroach/pull/103421 -[#103444]: https://github.com/cockroachdb/cockroach/pull/103444 -[#103450]: https://github.com/cockroachdb/cockroach/pull/103450 -[#103451]: https://github.com/cockroachdb/cockroach/pull/103451 -[#103466]: https://github.com/cockroachdb/cockroach/pull/103466 -[#103474]: https://github.com/cockroachdb/cockroach/pull/103474 -[#103520]: https://github.com/cockroachdb/cockroach/pull/103520 -[#103521]: https://github.com/cockroachdb/cockroach/pull/103521 -[#103526]: https://github.com/cockroachdb/cockroach/pull/103526 -[#103546]: https://github.com/cockroachdb/cockroach/pull/103546 -[#103556]: https://github.com/cockroachdb/cockroach/pull/103556 -[#103559]: https://github.com/cockroachdb/cockroach/pull/103559 -[#103626]: https://github.com/cockroachdb/cockroach/pull/103626 -[#103630]: https://github.com/cockroachdb/cockroach/pull/103630 -[#103631]: https://github.com/cockroachdb/cockroach/pull/103631 -[#103637]: https://github.com/cockroachdb/cockroach/pull/103637 -[#103639]: https://github.com/cockroachdb/cockroach/pull/103639 -[#103640]: https://github.com/cockroachdb/cockroach/pull/103640 -[#103689]: https://github.com/cockroachdb/cockroach/pull/103689 -[#103775]: https://github.com/cockroachdb/cockroach/pull/103775 -[#103777]: https://github.com/cockroachdb/cockroach/pull/103777 -[#99823]: https://github.com/cockroachdb/cockroach/pull/99823 -[#99946]: https://github.com/cockroachdb/cockroach/pull/99946 -[0903f9790]: https://github.com/cockroachdb/cockroach/commit/0903f9790 -[194007ac9]: https://github.com/cockroachdb/cockroach/commit/194007ac9 -[26f915186]: https://github.com/cockroachdb/cockroach/commit/26f915186 -[406baeb6b]: https://github.com/cockroachdb/cockroach/commit/406baeb6b -[448802fbd]: https://github.com/cockroachdb/cockroach/commit/448802fbd -[653aba7be]: https://github.com/cockroachdb/cockroach/commit/653aba7be -[8734e2c66]: https://github.com/cockroachdb/cockroach/commit/8734e2c66 -[ad2e4eda2]: https://github.com/cockroachdb/cockroach/commit/ad2e4eda2 -[c6f062a53]: https://github.com/cockroachdb/cockroach/commit/c6f062a53 -[cab396771]: https://github.com/cockroachdb/cockroach/commit/cab396771 -[ccfd125aa]: https://github.com/cockroachdb/cockroach/commit/ccfd125aa diff --git a/src/current/_includes/releases/v23.1/v23.1.20.md b/src/current/_includes/releases/v23.1/v23.1.20.md index 770b9a3bd46..5db3802fddf 100644 --- a/src/current/_includes/releases/v23.1/v23.1.20.md +++ b/src/current/_includes/releases/v23.1/v23.1.20.md @@ -6,21 +6,21 @@ Release Date: May 1, 2024

SQL language changes

-- Added a [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on versions 24.2+, and `false` on prior versions. [#123152][#123152] +- Added a [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on versions 24.2+, and `false` on prior versions. #123152 - Added three new [cluster settings]({% link v23.1/cluster-settings.md %}) for controlling [optimizer table statistics]({% link v23.1/cost-based-optimizer.md %}#table-statistics) forecasting: 1. `sql.stats.forecasts.min_observations` is the minimum number of observed statistics required to produce a forecast. 1. `sql.stats.forecasts.min_goodness_of_fit` is the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast. - 1. `sql.stats.forecasts.max_decrease` is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. [#123149][#123149] + 1. `sql.stats.forecasts.max_decrease` is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. #123149

Bug fixes

-- Statistics forecasts of zero rows by [the optimizer]({% link v23.1/cost-based-optimizer.md %}#table-statistics) can cause bad plans. This commit changes forecasting to avoid predicting zero rows for most downward-trending statistics. [#123149][#123149] -- A [job]({% link v23.1/show-jobs.md %}) will now [log]({% link v23.1/logging.md %}#ops) rather than fail if it reports an out-of bound progress fraction. [#123133][#123133] +- Statistics forecasts of zero rows by [the optimizer]({% link v23.1/cost-based-optimizer.md %}#table-statistics) can cause bad plans. This commit changes forecasting to avoid predicting zero rows for most downward-trending statistics. #123149 +- A [job]({% link v23.1/show-jobs.md %}) will now [log]({% link v23.1/logging.md %}#ops) rather than fail if it reports an out-of bound progress fraction. #123133

Performance improvements

-- Added a new [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled, the cost of [zigzag joins]({% link v23.1/cost-based-optimizer.md %}#zigzag-joins) is updated so zigzag joins will only be chosen over scans if the zigzag joins produce fewer rows. This change only applies if the session variable `enable_zigzag_join` is also `on`. [#123152][#123152] -- Improved the selectivity estimation of multi-column filters by the [optimizer]({% link v23.1/cost-based-optimizer.md %}) when the multi-column distinct count is high. This avoids cases where CockroachDB significantly over-estimates the selectivity of a multi-column predicate and as a result can prevent the optimizer from choosing a bad query plan. [#123152][#123152] +- Added a new [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled, the cost of [zigzag joins]({% link v23.1/cost-based-optimizer.md %}#zigzag-joins) is updated so zigzag joins will only be chosen over scans if the zigzag joins produce fewer rows. This change only applies if the session variable `enable_zigzag_join` is also `on`. #123152 +- Improved the selectivity estimation of multi-column filters by the [optimizer]({% link v23.1/cost-based-optimizer.md %}) when the multi-column distinct count is high. This avoids cases where CockroachDB significantly over-estimates the selectivity of a multi-column predicate and as a result can prevent the optimizer from choosing a bad query plan. #123152
@@ -30,6 +30,3 @@ This release includes 5 merged PRs by 5 authors.
-[#123133]: https://github.com/cockroachdb/cockroach/pull/123133 -[#123149]: https://github.com/cockroachdb/cockroach/pull/123149 -[#123152]: https://github.com/cockroachdb/cockroach/pull/123152 diff --git a/src/current/_includes/releases/v23.1/v23.1.21.md b/src/current/_includes/releases/v23.1/v23.1.21.md index 58cd97fef33..2d3325a590c 100644 --- a/src/current/_includes/releases/v23.1/v23.1.21.md +++ b/src/current/_includes/releases/v23.1/v23.1.21.md @@ -6,34 +6,34 @@ Release Date: May 7, 2024

SQL language changes

-- The `FORCE_INVERTED_INDEX` hint causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to prefer a query plan scan over any [inverted index]({% link v23.1/inverted-indexes.md %}) of the hinted table. An error is emitted if no such query plan can be generated. [#122301][#122301] +- The `FORCE_INVERTED_INDEX` hint causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to prefer a query plan scan over any [inverted index]({% link v23.1/inverted-indexes.md %}) of the hinted table. An error is emitted if no such query plan can be generated. #122301 - Introduced three new [cluster settings]({% link v23.1/cluster-settings.md %}) for controlling [table statistics]({% link v23.1/cost-based-optimizer.md %}#table-statistics) forecasting: - [`sql.stats.forecasts.min_observations`]({% link v23.1/cluster-settings.md %}) is the minimum number of observed statistics required to produce a forecast. - [`sql.stats.forecasts.min_goodness_of_fit`]({% link v23.1/cluster-settings.md %}) is the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast. - - [`sql.stats.forecasts.max_decrease`]({% link v23.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. [#122990][#122990] -- Added a [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on prior versions. [#123068][#123068] + - [`sql.stats.forecasts.max_decrease`]({% link v23.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. #122990 +- Added a [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on prior versions. #123068

Operational changes

-- A minimum [Raft scheduler]({% link v23.1/architecture/replication-layer.md %}#raft) concurrency is now enforced per [store]({% link v23.1/architecture/storage-layer.md %}#overview) so that nodes with many stores do not spread workers too thinly. This helps to avoid high scheduler latency across replicas on a store when load is imbalanced. [#120797][#120797] +- A minimum [Raft scheduler]({% link v23.1/architecture/replication-layer.md %}#raft) concurrency is now enforced per [store]({% link v23.1/architecture/storage-layer.md %}#overview) so that nodes with many stores do not spread workers too thinly. This helps to avoid high scheduler latency across replicas on a store when load is imbalanced. #120797

Bug fixes

-- Fixed a bug introduced in v22.2.9 that could cause a slow memory leak that can accumulate when opening many new connections. [#121056][#121056] -- [Sequence]({% link v23.1/create-sequence.md %}) options for `NO MINVALUE` and `NO MAXVALUE` now match [PostgreSQL behavior](https://www.postgresql.org/docs/current/sql-createsequence.html). Sequence `MINVALUE` and `MAXVALUE` now automatically adjust to the bounds of a new integer type in [`ALTER SEQUENCE ... AS`]({% link v23.1/alter-sequence.md %}), matching PostgreSQL behavior. [#121307][#121307] -- Fixed a bug where the [timeseries graphs shown on the **SQL Activity Statement Fingerprint** page]({% link v23.1/ui-statements-page.md %}#charts) in the [DB Console]({% link v23.1/ui-overview.md %}) were not rendering properly. This involved fixing a bug related to setting the time range of the charts. [#121382][#121382] [#122235][#122235] -- Fixed a bug where CockroachDB could incorrectly evaluate `IN` expressions that had `INT2` or `INT4` type on the left side, and values on the right side that were outside the range of the left side. The bug had been present since at least v21.1. [#121955][#121955] -- Previously, on long-running [sessions]({% link v23.1/show-sessions.md %}) that issue many (hundreds of thousands or more) [transactions]({% link v23.1/transactions.md %}), CockroachDB's internal memory accounting system, the limit for which is configured via the [`--max-sql-memory` flag]({% link v23.1/cockroach-start.md %}#general) could leak. This bug, in turn, could result in the error message `"root: memory budget exceeded"` for other queries. The bug was present in v23.1.17 and is now fixed. [#121949][#121949] [#122235][#122235] -- Reintroduced [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.auth.modify_cluster_setting_applies_to_all.enabled` so that mixed-version clusters can migrate off of this setting, which is deprecated in favor of the privilege [`MODIFYSQLCLUSTERSETTING`]({% link v23.1/set-cluster-setting.md %}#required-privileges). [#122055][#122055] [#122635][#122635] -- Fixed a bug where a [`GRANT ... ON ALL TABLES`]({% link v23.1/grant.md %}) statement could fail if sequences existed and they did not support a privilege (e.g., `BACKUP`). [#122057][#122057] -- Fixed a bug where [client certificate authentication]({% link v23.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.1/connection-parameters.md %}). This bug had been present since v23.1. [#122746][#122746] -- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. [#122990][#122990] +- Fixed a bug introduced in v22.2.9 that could cause a slow memory leak that can accumulate when opening many new connections. #121056 +- [Sequence]({% link v23.1/create-sequence.md %}) options for `NO MINVALUE` and `NO MAXVALUE` now match [PostgreSQL behavior](https://www.postgresql.org/docs/current/sql-createsequence.html). Sequence `MINVALUE` and `MAXVALUE` now automatically adjust to the bounds of a new integer type in [`ALTER SEQUENCE ... AS`]({% link v23.1/alter-sequence.md %}), matching PostgreSQL behavior. #121307 +- Fixed a bug where the [timeseries graphs shown on the **SQL Activity Statement Fingerprint** page]({% link v23.1/ui-statements-page.md %}#charts) in the [DB Console]({% link v23.1/ui-overview.md %}) were not rendering properly. This involved fixing a bug related to setting the time range of the charts. #121382 #122235 +- Fixed a bug where CockroachDB could incorrectly evaluate `IN` expressions that had `INT2` or `INT4` type on the left side, and values on the right side that were outside the range of the left side. The bug had been present since at least v21.1. #121955 +- Previously, on long-running [sessions]({% link v23.1/show-sessions.md %}) that issue many (hundreds of thousands or more) [transactions]({% link v23.1/transactions.md %}), CockroachDB's internal memory accounting system, the limit for which is configured via the [`--max-sql-memory` flag]({% link v23.1/cockroach-start.md %}#general) could leak. This bug, in turn, could result in the error message `"root: memory budget exceeded"` for other queries. The bug was present in v23.1.17 and is now fixed. #121949 #122235 +- Reintroduced [cluster setting]({% link v23.1/cluster-settings.md %}) `sql.auth.modify_cluster_setting_applies_to_all.enabled` so that mixed-version clusters can migrate off of this setting, which is deprecated in favor of the privilege [`MODIFYSQLCLUSTERSETTING`]({% link v23.1/set-cluster-setting.md %}#required-privileges). #122055 #122635 +- Fixed a bug where a [`GRANT ... ON ALL TABLES`]({% link v23.1/grant.md %}) statement could fail if sequences existed and they did not support a privilege (e.g., `BACKUP`). #122057 +- Fixed a bug where [client certificate authentication]({% link v23.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.1/connection-parameters.md %}). This bug had been present since v23.1. #122746 +- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. #122990

Performance improvements

-- More efficient [query plans]({% link v23.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.1/set-vars.md %}) is enabled. It is disabled by default. [#122683][#122683] -- Added a new [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled, the cost of [zigzag joins]({% link v23.1/cost-based-optimizer.md %}#zigzag-joins) is updated so zigzag joins will only be chosen over scans if the zigzag joins produce fewer rows. This change only applies if the session variable `enable_zigzag_join` is also `on`. [#123068][#123068] -- Improved the selectivity estimation of multi-column filters by the [optimizer]({% link v23.1/cost-based-optimizer.md %}) when the multi-column distinct count is high. This avoids cases where CockroachDB significantly over-estimates the selectivity of a multi-column predicate and as a result can prevent the optimizer from choosing a bad query plan. [#123068][#123068] +- More efficient [query plans]({% link v23.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.1/set-vars.md %}) is enabled. It is disabled by default. #122683 +- Added a new [session variable]({% link v23.1/set-vars.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled, the cost of [zigzag joins]({% link v23.1/cost-based-optimizer.md %}#zigzag-joins) is updated so zigzag joins will only be chosen over scans if the zigzag joins produce fewer rows. This change only applies if the session variable `enable_zigzag_join` is also `on`. #123068 +- Improved the selectivity estimation of multi-column filters by the [optimizer]({% link v23.1/cost-based-optimizer.md %}) when the multi-column distinct count is high. This avoids cases where CockroachDB significantly over-estimates the selectivity of a multi-column predicate and as a result can prevent the optimizer from choosing a bad query plan. #123068

Contributors

@@ -41,18 +41,3 @@ This release includes 59 merged PRs by 26 authors. -[#120797]: https://github.com/cockroachdb/cockroach/pull/120797 -[#121056]: https://github.com/cockroachdb/cockroach/pull/121056 -[#121307]: https://github.com/cockroachdb/cockroach/pull/121307 -[#121382]: https://github.com/cockroachdb/cockroach/pull/121382 -[#121949]: https://github.com/cockroachdb/cockroach/pull/121949 -[#121955]: https://github.com/cockroachdb/cockroach/pull/121955 -[#122055]: https://github.com/cockroachdb/cockroach/pull/122055 -[#122057]: https://github.com/cockroachdb/cockroach/pull/122057 -[#122235]: https://github.com/cockroachdb/cockroach/pull/122235 -[#122301]: https://github.com/cockroachdb/cockroach/pull/122301 -[#122635]: https://github.com/cockroachdb/cockroach/pull/122635 -[#122683]: https://github.com/cockroachdb/cockroach/pull/122683 -[#122746]: https://github.com/cockroachdb/cockroach/pull/122746 -[#122990]: https://github.com/cockroachdb/cockroach/pull/122990 -[#123068]: https://github.com/cockroachdb/cockroach/pull/123068 diff --git a/src/current/_includes/releases/v23.1/v23.1.22.md b/src/current/_includes/releases/v23.1/v23.1.22.md index 4a71c1e8df2..ea38b5e67dd 100644 --- a/src/current/_includes/releases/v23.1/v23.1.22.md +++ b/src/current/_includes/releases/v23.1/v23.1.22.md @@ -11,39 +11,39 @@ Release Date: May 23, 2024 - The cluster settings `changefeed.frontier_checkpoint_frequency` and `low changefeed.frontier_highwater_lag_checkpoint_threshold` were set low, which resulted in the initial scan taking many multiples of the configured frequency to complete. - There were multiple target tables with significant differences in row counts in one changefeed. - The changefeed target tables were large with many ranges. - - The initial scan took a long time to complete (an hour or longer). [#124454][#124454] + - The initial scan took a long time to complete (an hour or longer). #124454

SQL language changes

-- Updated the [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) responses to display the `object_type` and `object_name`, which has replaced the `relation_name` column. [#122718][#122718] -- Added [external connection]({% link v23.1/create-external-connection.md %}) granted privileges to the [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) command. [#122718][#122718] +- Updated the [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) responses to display the `object_type` and `object_name`, which has replaced the `relation_name` column. #122718 +- Added [external connection]({% link v23.1/create-external-connection.md %}) granted privileges to the [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) command. #122718 - Introduced three new [cluster settings]({% link v23.1/cluster-settings.md %}) for controlling table statistics forecasting: - [`sql.stats.forecasts.min_observations`]({% link v23.1/cluster-settings.md %}) is the minimum number of observed statistics required to produce a forecast. - [`sql.stats.forecasts.min_goodness_of_fit`]({% link v23.1/cluster-settings.md %}) is the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast. - - [`sql.stats.forecasts.max_decrease`]({% link v23.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. [#122594][#122594] -- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. [#123083][#123083] + - [`sql.stats.forecasts.max_decrease`]({% link v23.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. #122594 +- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. #123083

Command-line changes

-- Changed the default `debug compact` maximum compaction concurrency to the number of processors, and added a `--max-concurrency` flag for overriding the new default. [#123070][#123070] +- Changed the default `debug compact` maximum compaction concurrency to the number of processors, and added a `--max-concurrency` flag for overriding the new default. #123070

Bug fixes

-- Fixed a bug where a [`RESTORE`](../v23.1/restore.html) of a backup that itself contained a table created by the `RESTORE` of a table with an in-progress [`IMPORT INTO`](../v23.1/import-into.html) would fail to restore all rows. [#120542][#120542] -- Fixed a bug where [client certificate authentication]({% link v23.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.1/connection-parameters.md %}). [#122747][#122747] -- Fixed a bug where the [row-based execution engine]({% link v23.1/architecture/sql-layer.md %}#query-execution) could drop a [`LIMIT`]({% link v23.1/limit-offset.md %}) clause when there was an [`ORDER BY`]({% link v23.1/order-by.md %}) clause, and the ordering was partially provided by an input operator. For example, this bug could occur with an ordering such as `ORDER BY a, b` when the scanned index was only ordered on column `a`. The impact of this bug was that more rows may have been returned than specified by the `LIMIT` clause. This bug is only present when not using the [vectorized execution engine]({% link v23.1/architecture/sql-layer.md %}#vectorized-query-execution). That is, when running with `SET vectorize = off;`. This bug has existed since CockroachDB v22.1. [#122835][#122835] -- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. [#122594][#122594] -- Fixed a bug in the DB Console's [**Custom Chart**]({% link v23.1/ui-custom-chart-debug-page.md %}) tool where store-level metrics were displayed only for the first store ID associated with the node. Now data is displayed for all stores present on a node, and a single time series is shown for each store, rather than an aggregated value for all of the node's stores. This allows finer-grained monitoring of store-level metrics. [#122806][#122806] -- Fixed a bug where a [job]({% link v23.1/show-jobs.md %}) would fail if it reported an out-of-bound progress fraction. The error is now logged and no longer causes the job to fail. [#122963][#122963] -- Fixed a bug where, in certain cases, using virtual tables such as `crdb_internal.system_jobs` could result in the internal error `attempting to append refresh spans after the tracked timestamp has moved forward`. [#123154][#123154] +- Fixed a bug where a [`RESTORE`](../v23.1/restore.html) of a backup that itself contained a table created by the `RESTORE` of a table with an in-progress [`IMPORT INTO`](../v23.1/import-into.html) would fail to restore all rows. #120542 +- Fixed a bug where [client certificate authentication]({% link v23.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.1/connection-parameters.md %}). #122747 +- Fixed a bug where the [row-based execution engine]({% link v23.1/architecture/sql-layer.md %}#query-execution) could drop a [`LIMIT`]({% link v23.1/limit-offset.md %}) clause when there was an [`ORDER BY`]({% link v23.1/order-by.md %}) clause, and the ordering was partially provided by an input operator. For example, this bug could occur with an ordering such as `ORDER BY a, b` when the scanned index was only ordered on column `a`. The impact of this bug was that more rows may have been returned than specified by the `LIMIT` clause. This bug is only present when not using the [vectorized execution engine]({% link v23.1/architecture/sql-layer.md %}#vectorized-query-execution). That is, when running with `SET vectorize = off;`. This bug has existed since CockroachDB v22.1. #122835 +- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. #122594 +- Fixed a bug in the DB Console's [**Custom Chart**]({% link v23.1/ui-custom-chart-debug-page.md %}) tool where store-level metrics were displayed only for the first store ID associated with the node. Now data is displayed for all stores present on a node, and a single time series is shown for each store, rather than an aggregated value for all of the node's stores. This allows finer-grained monitoring of store-level metrics. #122806 +- Fixed a bug where a [job]({% link v23.1/show-jobs.md %}) would fail if it reported an out-of-bound progress fraction. The error is now logged and no longer causes the job to fail. #122963 +- Fixed a bug where, in certain cases, using virtual tables such as `crdb_internal.system_jobs` could result in the internal error `attempting to append refresh spans after the tracked timestamp has moved forward`. #123154

Performance improvements

-- More efficient [query plans]({% link v23.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.1/set-vars.md %}) is enabled. It is disabled by default. [#122723][#122723] -- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) now costs `distinct-on` operators more accurately. It may produce more efficient query plans in some cases. [#122851][#122851] -- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v23.1/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan. [#123083][#123083] -- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v23.1/cost-based-optimizer.md %}) from choosing a bad query plan due to over-estimating the selectivity of a multi-column predicate. [#123083][#123083] -- Improved the efficiency of error handling in the [vectorized execution engine]({% link v23.1/vectorized-execution.md %}) to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. [#123503][#123503] +- More efficient [query plans]({% link v23.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.1/set-vars.md %}) is enabled. It is disabled by default. #122723 +- The [optimizer]({% link v23.1/cost-based-optimizer.md %}) now costs `distinct-on` operators more accurately. It may produce more efficient query plans in some cases. #122851 +- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v23.1/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan. #123083 +- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v23.1/cost-based-optimizer.md %}) from choosing a bad query plan due to over-estimating the selectivity of a multi-column predicate. #123083 +- Improved the efficiency of error handling in the [vectorized execution engine]({% link v23.1/vectorized-execution.md %}) to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. #123503
@@ -53,20 +53,3 @@ This release includes 51 merged PRs by 23 authors.
-[#120542]: https://github.com/cockroachdb/cockroach/pull/120542 -[#122594]: https://github.com/cockroachdb/cockroach/pull/122594 -[#122718]: https://github.com/cockroachdb/cockroach/pull/122718 -[#122723]: https://github.com/cockroachdb/cockroach/pull/122723 -[#122747]: https://github.com/cockroachdb/cockroach/pull/122747 -[#122806]: https://github.com/cockroachdb/cockroach/pull/122806 -[#122835]: https://github.com/cockroachdb/cockroach/pull/122835 -[#122851]: https://github.com/cockroachdb/cockroach/pull/122851 -[#122963]: https://github.com/cockroachdb/cockroach/pull/122963 -[#123070]: https://github.com/cockroachdb/cockroach/pull/123070 -[#123083]: https://github.com/cockroachdb/cockroach/pull/123083 -[#123154]: https://github.com/cockroachdb/cockroach/pull/123154 -[#123377]: https://github.com/cockroachdb/cockroach/pull/123377 -[#123396]: https://github.com/cockroachdb/cockroach/pull/123396 -[#123503]: https://github.com/cockroachdb/cockroach/pull/123503 -[#124454]: https://github.com/cockroachdb/cockroach/pull/124454 -[dc349c389]: https://github.com/cockroachdb/cockroach/commit/dc349c389 diff --git a/src/current/_includes/releases/v23.1/v23.1.23.md b/src/current/_includes/releases/v23.1/v23.1.23.md index 35c3fd9e98e..01866145233 100644 --- a/src/current/_includes/releases/v23.1/v23.1.23.md +++ b/src/current/_includes/releases/v23.1/v23.1.23.md @@ -10,46 +10,46 @@ Release Date: June 20, 2024 - The [cluster settings]({% link v23.1/cluster-settings.md %}) `changefeed.frontier_checkpoint_frequency` and `changefeed.frontier_highwater_lag_checkpoint_threshold` were set low, which resulted in the initial scan taking many multiples of the configured frequency to complete. - There were multiple target tables with significant differences in row counts in one changefeed. - The changefeed target tables were large with many ranges. - - The initial scan took a long time to complete (an hour or longer). [#123970][#123970] [#124759][#124759] -- Introduced the `changefeed.random_replica_selection.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) that changes the behavior of changefeed planning work distribution. When `changefeed.random_replica_selection.enabled` is set to `true`, [changefeeds]({% link v23.1/change-data-capture-overview.md %}) will evenly distribute their work across all [replicas]({% link v23.1/architecture/glossary.md %}#replica), including [followers]({% link v23.1/architecture/replication-layer.md %}#raft), regardless of [leaseholder]({% link v23.1/architecture/glossary.md %}#leaseholder) placement. `changefeed.random_replica_selection.enabled` is disabled by default. [#124930][#124930] + - The initial scan took a long time to complete (an hour or longer). #123970 #124759 +- Introduced the `changefeed.random_replica_selection.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) that changes the behavior of changefeed planning work distribution. When `changefeed.random_replica_selection.enabled` is set to `true`, [changefeeds]({% link v23.1/change-data-capture-overview.md %}) will evenly distribute their work across all [replicas]({% link v23.1/architecture/glossary.md %}#replica), including [followers]({% link v23.1/architecture/replication-layer.md %}#raft), regardless of [leaseholder]({% link v23.1/architecture/glossary.md %}#leaseholder) placement. `changefeed.random_replica_selection.enabled` is disabled by default. #124930

SQL language changes

- Introduced three new [cluster settings]({% link v23.1/cluster-settings.md %}) for controlling [table statistics forecasting]({% link v23.1/show-statistics.md %}#display-forecasted-statistics): - [`sql.stats.forecasts.min_observations`]({% link v23.1/cluster-settings.md %}) is the minimum number of observed statistics required to produce a forecast. - [`sql.stats.forecasts.min_goodness_of_fit`]({% link v23.1/cluster-settings.md %}) is the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast. - - [`sql.stats.forecasts.max_decrease`]({% link v23.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. [#124076][#124076] -- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. [#124076][#124076] -- The [row-level TTL]({% link v23.1/row-level-ttl.md %}) setting [`ttl_delete_rate_limit`]({% link v23.1/row-level-ttl.md %}#ttl-storage-parameters) is now set to `100` by default, which sets the rate limit for deleting expired rows to `100`. [#124362][#124362] -- CockroachDB no longer limits precision when converting [spatial data types]({% link v23.1/architecture/glossary.md %}#data-types) to [JSON]({% link v23.1/jsonb.md %}). [#124534][#124534] + - [`sql.stats.forecasts.max_decrease`]({% link v23.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. #124076 +- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. #124076 +- The [row-level TTL]({% link v23.1/row-level-ttl.md %}) setting [`ttl_delete_rate_limit`]({% link v23.1/row-level-ttl.md %}#ttl-storage-parameters) is now set to `100` by default, which sets the rate limit for deleting expired rows to `100`. #124362 +- CockroachDB no longer limits precision when converting [spatial data types]({% link v23.1/architecture/glossary.md %}#data-types) to [JSON]({% link v23.1/jsonb.md %}). #124534

Operational changes

-- - The `client_authentication_ok` and `client_session_end` events are now logged to the [`SESSIONS` log channel]({% link v23.1/logging-use-cases.md %}#sessions) unconditionally. Previously, these would only be logged if the `server.auth_log.sql_sessions.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) was set to `true`. All other `SESSIONS` log messages are still only logged if `server.auth_log.sql_sessions.enabled` or `server.auth_log.sql_connections.enabled` are set to `true`. To not show `client_authentication_ok` and `client_session_end` events, disable the `SESSIONS` log channel entirely. [#124375][#124375] +- - The `client_authentication_ok` and `client_session_end` events are now logged to the [`SESSIONS` log channel]({% link v23.1/logging-use-cases.md %}#sessions) unconditionally. Previously, these would only be logged if the `server.auth_log.sql_sessions.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) was set to `true`. All other `SESSIONS` log messages are still only logged if `server.auth_log.sql_sessions.enabled` or `server.auth_log.sql_connections.enabled` are set to `true`. To not show `client_authentication_ok` and `client_session_end` events, disable the `SESSIONS` log channel entirely. #124375

DB Console changes

-- Viewing the [**SQL Activity**]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-results) sorted by `% of Runtime` now correctly sorts entries by the runtime amount. [#123899][#123899] -- The [DB Console]({% link v23.1/ui-overview.md %}) favicon and other image files now render properly. [#122702][#122702] +- Viewing the [**SQL Activity**]({% link v23.1/ui-statements-page.md %}#statement-fingerprints-results) sorted by `% of Runtime` now correctly sorts entries by the runtime amount. #123899 +- The [DB Console]({% link v23.1/ui-overview.md %}) favicon and other image files now render properly. #122702

Bug fixes

-- Fixed a bug where a failed [restore]({% link v23.1/restore.md %}) job could leave the system in a state where re-attempting the restore was not possible without manual intervention. [#123462][#123462] -- [Index recommendations]({% link v23.1/ui-databases-page.md %}#index-recommendations) in the [DB Console]({% link v23.1/ui-overview.md %}) will now function properly for indexes on tables or columns whose names contain quotation marks or whitespace. For example: `CREATE INDEX ON "my table" ("my col");`. [#122117][#122117] -- Fixed a bug introduced in v23.1 where [client certificate authentication]({% link v23.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.1/connection-parameters.md %}). [#124076][#124076] -- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. [#124076][#124076] -- Fixed a bug where [`DROP ROLE`]({% link v23.1/drop-role.md %}) and [`DROP USER`]({% link v23.1/drop-user.md %}) could leave references behind inside [`TYPE`s]({% link v23.1/create-type.md %}), which could prevent [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) from working. [#124645][#124645] -- Scattering a range with a replication factor of 1 now no longer erroneously up-replicates the range to two replicas. Leases will also no longer thrash between nodes when perturbed with a replication factor of 1. [#124500][#124500] -- Fixed a bug where, if the `ttl_row_stats_poll_interval` storage parameter was non-zero for a table with [row-level TTL]({% link v23.1/row-level-ttl.md %}) enabled, the queries issued to update row statistics could block the job from completing. Now, if the job completes, these statistics queries are cancelled. This means that the `jobs.row_level_ttl.total_rows` and `jobs.row_level_ttl.total_expired_rows` metrics will report `0` if the job finishes before the row stats queries complete. [#124625][#124625] -- Fixed a bug where the `results_buffer_size` [session variable]({% link v23.1/session-variables.md %}) could not be configured by using the "options" query parameter in the connection string, but only as a top-level query parameter. Now, `results_buffer_size` can be configured in either part of the connection string. This variable still cannot be changed with the [`SET`]({% link v23.1/set-vars.md %}) command after the session begins. [#124773][#124773] -- [`SHOW TYPES`]({% link v23.1/show-types.md %}) now includes user-defined [composite types]({% link v23.1/create-type.md %}#create-a-composite-data-type). `SHOW TYPES` previously omitted composite types, which were added in v23.1.0. [#124815][#124815] -- Fixed a bug where a change to a [user-defined type]({% link v23.1/create-type.md %}) could cause queries against tables using that type to fail with the error `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v23.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ADD REGION`]({% link v23.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v23.1/alter-database.md %}#drop-region) statement (which implicitly change the `crdb_internal_region` type). This bug was present since user-defined types were introduced in v20.2. [#125473][#125473] +- Fixed a bug where a failed [restore]({% link v23.1/restore.md %}) job could leave the system in a state where re-attempting the restore was not possible without manual intervention. #123462 +- [Index recommendations]({% link v23.1/ui-databases-page.md %}#index-recommendations) in the [DB Console]({% link v23.1/ui-overview.md %}) will now function properly for indexes on tables or columns whose names contain quotation marks or whitespace. For example: `CREATE INDEX ON "my table" ("my col");`. #122117 +- Fixed a bug introduced in v23.1 where [client certificate authentication]({% link v23.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.1/connection-parameters.md %}). #124076 +- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. #124076 +- Fixed a bug where [`DROP ROLE`]({% link v23.1/drop-role.md %}) and [`DROP USER`]({% link v23.1/drop-user.md %}) could leave references behind inside [`TYPE`s]({% link v23.1/create-type.md %}), which could prevent [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) from working. #124645 +- Scattering a range with a replication factor of 1 now no longer erroneously up-replicates the range to two replicas. Leases will also no longer thrash between nodes when perturbed with a replication factor of 1. #124500 +- Fixed a bug where, if the `ttl_row_stats_poll_interval` storage parameter was non-zero for a table with [row-level TTL]({% link v23.1/row-level-ttl.md %}) enabled, the queries issued to update row statistics could block the job from completing. Now, if the job completes, these statistics queries are cancelled. This means that the `jobs.row_level_ttl.total_rows` and `jobs.row_level_ttl.total_expired_rows` metrics will report `0` if the job finishes before the row stats queries complete. #124625 +- Fixed a bug where the `results_buffer_size` [session variable]({% link v23.1/session-variables.md %}) could not be configured by using the "options" query parameter in the connection string, but only as a top-level query parameter. Now, `results_buffer_size` can be configured in either part of the connection string. This variable still cannot be changed with the [`SET`]({% link v23.1/set-vars.md %}) command after the session begins. #124773 +- [`SHOW TYPES`]({% link v23.1/show-types.md %}) now includes user-defined [composite types]({% link v23.1/create-type.md %}#create-a-composite-data-type). `SHOW TYPES` previously omitted composite types, which were added in v23.1.0. #124815 +- Fixed a bug where a change to a [user-defined type]({% link v23.1/create-type.md %}) could cause queries against tables using that type to fail with the error `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v23.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ADD REGION`]({% link v23.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v23.1/alter-database.md %}#drop-region) statement (which implicitly change the `crdb_internal_region` type). This bug was present since user-defined types were introduced in v20.2. #125473

Performance improvements

-- More efficient [query plans]({% link v23.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.1/session-variables.md %}) is enabled. It is disabled by default. [#124076][#124076] -- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v23.1/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan. [#124076][#124076] -- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v23.1/cost-based-optimizer.md %}) from choosing a suboptimal query plan due to over-estimating the selectivity of a multi-column predicate. [#124076][#124076] +- More efficient [query plans]({% link v23.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.1/session-variables.md %}) is enabled. It is disabled by default. #124076 +- Added a new [session setting]({% link v23.1/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v23.1/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan. #124076 +- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v23.1/cost-based-optimizer.md %}) from choosing a suboptimal query plan due to over-estimating the selectivity of a multi-column predicate. #124076
@@ -59,22 +59,3 @@ This release includes 48 merged PRs by 23 authors.
-[#122117]: https://github.com/cockroachdb/cockroach/pull/122117 -[#122702]: https://github.com/cockroachdb/cockroach/pull/122702 -[#123462]: https://github.com/cockroachdb/cockroach/pull/123462 -[#123899]: https://github.com/cockroachdb/cockroach/pull/123899 -[#123970]: https://github.com/cockroachdb/cockroach/pull/123970 -[#124076]: https://github.com/cockroachdb/cockroach/pull/124076 -[#124296]: https://github.com/cockroachdb/cockroach/pull/124296 -[#124362]: https://github.com/cockroachdb/cockroach/pull/124362 -[#124375]: https://github.com/cockroachdb/cockroach/pull/124375 -[#124500]: https://github.com/cockroachdb/cockroach/pull/124500 -[#124534]: https://github.com/cockroachdb/cockroach/pull/124534 -[#124625]: https://github.com/cockroachdb/cockroach/pull/124625 -[#124645]: https://github.com/cockroachdb/cockroach/pull/124645 -[#124759]: https://github.com/cockroachdb/cockroach/pull/124759 -[#124773]: https://github.com/cockroachdb/cockroach/pull/124773 -[#124815]: https://github.com/cockroachdb/cockroach/pull/124815 -[#124930]: https://github.com/cockroachdb/cockroach/pull/124930 -[#125473]: https://github.com/cockroachdb/cockroach/pull/125473 -[268fbf5b4]: https://github.com/cockroachdb/cockroach/commit/268fbf5b4 diff --git a/src/current/_includes/releases/v23.1/v23.1.24.md b/src/current/_includes/releases/v23.1/v23.1.24.md index 379186b4715..5a861c57139 100644 --- a/src/current/_includes/releases/v23.1/v23.1.24.md +++ b/src/current/_includes/releases/v23.1/v23.1.24.md @@ -6,24 +6,24 @@ Release Date: July 18, 2024

{{ site.data.products.enterprise }} edition changes

-- [`ALTER CHANGEFEED`]({% link v23.1/alter-changefeed.md %}) no longer removes the CDC query when modifying [changefeed]({% link v23.1/change-data-capture-overview.md %}) properties. [#125436][#125436] +- [`ALTER CHANGEFEED`]({% link v23.1/alter-changefeed.md %}) no longer removes the CDC query when modifying [changefeed]({% link v23.1/change-data-capture-overview.md %}) properties. #125436

Operational changes

-- Improved disk usage metric reporting over volumes that dynamically change their size over the life of the `cockroach` process. [#125106][#125106] -- Removed `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` from the [`debug zip`]({% link v23.1/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the per-node execution insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. [#125810][#125810] -- Some debugging-only information about physical plans is no longer collected in the `system.job_info` table for changefeeds, because it has the potential to grow very large. [#126102][#126102] +- Improved disk usage metric reporting over volumes that dynamically change their size over the life of the `cockroach` process. #125106 +- Removed `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` from the [`debug zip`]({% link v23.1/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the per-node execution insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. #125810 +- Some debugging-only information about physical plans is no longer collected in the `system.job_info` table for changefeeds, because it has the potential to grow very large. #126102

Bug fixes

-- Fixed handling in the [declarative schema changer]({% link v23.1/online-schema-changes.md %}) when columns are included in the `STORING()` clause of [`CREATE INDEX`]({% link v23.1/create-index.md %}). CockroachDB now checks if the column is virtual up-front, and properly detects when a column is already handled by an existing index when the column name has `UTF-8` characters. [#125208][#125208] -- Fixed a bug where a change to a [user-defined type (UDT)]({% link v23.1/create-type.md %}) could cause queries against tables using that type to fail with an error message like: `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could occur either directly from an [`ALTER TYPE`]({% link v23.1/alter-type.md %} statement or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v23.1/alter-database.md %}#add-region) or [`ALTER DATABASE ... DROP REGION`]({% link v23.1/alter-database.md %}#drop-region) statement, which implicitly modifies the `crdb_internal_region` UDT. This bug had existed since UDTs were introduced in v20.2. [#124855][#124855] -- Fixed an issue where [adding a column]({% link v23.1/alter-table.md %}#add-column) with a default value of an empty array would not succeed. [#125328][#125328] -- [`ALTER TABLE ... ADD CONSTRAINT UNIQUE`]({% link v23.1/alter-table.md %}#add-constraint) will now fail with a well-formed error message and code `42601` if a statement tries to add a unique constraint on an expression. [#125420][#125420] -- Fixed a bug in v24.1, v23.2, and v23.1 where using [`changefeed.aggregator.flush_jitter`]({% link v23.1/cluster-settings.md %}#setting-changefeed-aggregator-flush-jitter) with [`min_checkpoint_frequency`]({% link v23.1/create-changefeed.md %}#min-checkpoint-frequency) set to zero could cause panics. [#125495][#125495] -- Fixed a bug in which constant `LIKE` patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. [#125537][#125537] -- Fixed a bug that could cause calling a routine to return an unexpected `function ... does not exist` error. The bug is triggered when the routine is called twice using the exact same SQL query, and either: (a) the routine has polymorphic arguments, or: (b) in between the two calls, the routine is replaced by a routine with the same name and different parameters. This bug had existed since alpha versions of v23.1. [#123513][#123513] -- Fixed the statistics estimation code in the [optimizer]({% link v23.1/cost-based-optimizer.md %}) so it does not use the empty histograms produced if histogram collection has been disabled during statistics collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug had existed since v22.1. [#126158][#126158] +- Fixed handling in the [declarative schema changer]({% link v23.1/online-schema-changes.md %}) when columns are included in the `STORING()` clause of [`CREATE INDEX`]({% link v23.1/create-index.md %}). CockroachDB now checks if the column is virtual up-front, and properly detects when a column is already handled by an existing index when the column name has `UTF-8` characters. #125208 +- Fixed a bug where a change to a [user-defined type (UDT)]({% link v23.1/create-type.md %}) could cause queries against tables using that type to fail with an error message like: `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could occur either directly from an [`ALTER TYPE`]({% link v23.1/alter-type.md %} statement or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v23.1/alter-database.md %}#add-region) or [`ALTER DATABASE ... DROP REGION`]({% link v23.1/alter-database.md %}#drop-region) statement, which implicitly modifies the `crdb_internal_region` UDT. This bug had existed since UDTs were introduced in v20.2. #124855 +- Fixed an issue where [adding a column]({% link v23.1/alter-table.md %}#add-column) with a default value of an empty array would not succeed. #125328 +- [`ALTER TABLE ... ADD CONSTRAINT UNIQUE`]({% link v23.1/alter-table.md %}#add-constraint) will now fail with a well-formed error message and code `42601` if a statement tries to add a unique constraint on an expression. #125420 +- Fixed a bug in v24.1, v23.2, and v23.1 where using [`changefeed.aggregator.flush_jitter`]({% link v23.1/cluster-settings.md %}#setting-changefeed-aggregator-flush-jitter) with [`min_checkpoint_frequency`]({% link v23.1/create-changefeed.md %}#min-checkpoint-frequency) set to zero could cause panics. #125495 +- Fixed a bug in which constant `LIKE` patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. #125537 +- Fixed a bug that could cause calling a routine to return an unexpected `function ... does not exist` error. The bug is triggered when the routine is called twice using the exact same SQL query, and either: (a) the routine has polymorphic arguments, or: (b) in between the two calls, the routine is replaced by a routine with the same name and different parameters. This bug had existed since alpha versions of v23.1. #123513 +- Fixed the statistics estimation code in the [optimizer]({% link v23.1/cost-based-optimizer.md %}) so it does not use the empty histograms produced if histogram collection has been disabled during statistics collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug had existed since v22.1. #126158
@@ -33,19 +33,3 @@ This release includes 56 merged PRs by 27 authors.
-[#123513]: https://github.com/cockroachdb/cockroach/pull/123513 -[#124855]: https://github.com/cockroachdb/cockroach/pull/124855 -[#125106]: https://github.com/cockroachdb/cockroach/pull/125106 -[#125208]: https://github.com/cockroachdb/cockroach/pull/125208 -[#125328]: https://github.com/cockroachdb/cockroach/pull/125328 -[#125420]: https://github.com/cockroachdb/cockroach/pull/125420 -[#125436]: https://github.com/cockroachdb/cockroach/pull/125436 -[#125495]: https://github.com/cockroachdb/cockroach/pull/125495 -[#125528]: https://github.com/cockroachdb/cockroach/pull/125528 -[#125537]: https://github.com/cockroachdb/cockroach/pull/125537 -[#125810]: https://github.com/cockroachdb/cockroach/pull/125810 -[#126102]: https://github.com/cockroachdb/cockroach/pull/126102 -[#126147]: https://github.com/cockroachdb/cockroach/pull/126147 -[#126158]: https://github.com/cockroachdb/cockroach/pull/126158 -[#126223]: https://github.com/cockroachdb/cockroach/pull/126223 -[87ba56fce]: https://github.com/cockroachdb/cockroach/commit/87ba56fce diff --git a/src/current/_includes/releases/v23.1/v23.1.25.md b/src/current/_includes/releases/v23.1/v23.1.25.md index 51f17f47949..bcbb201c6c0 100644 --- a/src/current/_includes/releases/v23.1/v23.1.25.md +++ b/src/current/_includes/releases/v23.1/v23.1.25.md @@ -21,60 +21,39 @@ Release Date: August 15, 2024 - [`CREATE EXTERNAL CONNECTION`]({% link v23.1/create-external-connection.md %}) - [`COPY`]({% link v23.1/copy.md %}) - [#127537][#127537] + #127537 - The cluster setting [`server.jwt_authentication.issuers`]({% link v23.1/sso-sql.md %}#cluster-settings) can now take multiple values to support various kinds of providers and their mapped JWKS URIs. This can be set to one of the following values: - Simple string that Go can parse as a valid issuer URL: `'https://accounts.google.com'`. - String that can be parsed as a valid JSON array of issuer URLs list: `['example.com/adfs','https://accounts.google.com']`. - String that can be parsed as a valid JSON and deserialized into a map of issuer URLs to corresponding JWKS URIs. In the third case, CockroachDB will override the JWKS URI present in the issuer's well-known endpoint: `'{ "issuer_jwks_map": { "https://accounts.google.com": "https://www.googleapis.com/oauth2/v3/certs", "example.com/adfs": "https://example.com/adfs/discovery/keys" } }'`. - When `issuer_jwks_map` is set, CockroachDB directly uses the JWKS URI to get the key set. In all other cases where [`server.jwt_authentication.jwks_auto_fetch.enabled`]({% link v23.1/sso-sql.md %}#cluster-settings) is set CockroachDB obtains the JWKS URI first from the issuer's well-known endpoint and then uses this endpoint. [#128751][#128751] + When `issuer_jwks_map` is set, CockroachDB directly uses the JWKS URI to get the key set. In all other cases where [`server.jwt_authentication.jwks_auto_fetch.enabled`]({% link v23.1/sso-sql.md %}#cluster-settings) is set CockroachDB obtains the JWKS URI first from the issuer's well-known endpoint and then uses this endpoint. #128751

SQL language changes

-- Added the `sql.auth.grant_option_inheritance.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which results in behavior that matches the existing behavior of CockroachDB. When set to `false`, the [`GRANT OPTION`]({% link v23.1/grant.md %}) is not inherited via role membership, which will prevent the descendant roles from being able to grant the privilege to others. The privilege itself is still inherited via role membership. [#126862][#126862] -- Added the `sql.auth.grant_option_for_owner.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled). The default value is `true`, which results in behavior that matches the existing behavior of CockroachDB. When set to `false`, then the [`GRANT OPTION`]({% link v23.1/grant.md %}) is not implcitly given to the owner of an object. The object owner still implicitly has all privileges on the object, just not the ability to grant them to other users. [#127005][#127005] +- Added the `sql.auth.grant_option_inheritance.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which results in behavior that matches the existing behavior of CockroachDB. When set to `false`, the [`GRANT OPTION`]({% link v23.1/grant.md %}) is not inherited via role membership, which will prevent the descendant roles from being able to grant the privilege to others. The privilege itself is still inherited via role membership. #126862 +- Added the `sql.auth.grant_option_for_owner.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled). The default value is `true`, which results in behavior that matches the existing behavior of CockroachDB. When set to `false`, then the [`GRANT OPTION`]({% link v23.1/grant.md %}) is not implcitly given to the owner of an object. The object owner still implicitly has all privileges on the object, just not the ability to grant them to other users. #127005

DB Console changes

-- The [Database]({% link v23.1/ui-databases-page.md %}) details and Table details pages in the [DB Console]({% link v23.1/ui-overview.md %}) now display the correct `"stats last created"` value. [#126415][#126415] -- The [Database]({% link v23.1/ui-databases-page.md %}) and Tables pages in the [DB Console]({% link v23.1/ui-overview.md %}) will show a loading state while loading information for databases and tables including size and [range]({% link v23.1/architecture/glossary.md %}#architecture-range) counts. [#127699][#127699] -- In the [Database]({% link v23.1/ui-databases-page.md %}) details page, the table name will no longer appear with quotes around the schema and table name. [#127763][#127763] -- [`ALTER ROLE`]({% link v23.1/alter-role.md %}) events in the [DB Console]({% link v23.1/ui-overview.md %}) event log now render correctly when the event does not contain any role options. [#126563][#126563] -- Fixed a bug where the [Databases]({% link v24.1/ui-databases-page.md %}) page crashed if the range information was not available. [#127090][#127090] +- The [Database]({% link v23.1/ui-databases-page.md %}) details and Table details pages in the [DB Console]({% link v23.1/ui-overview.md %}) now display the correct `"stats last created"` value. #126415 +- The [Database]({% link v23.1/ui-databases-page.md %}) and Tables pages in the [DB Console]({% link v23.1/ui-overview.md %}) will show a loading state while loading information for databases and tables including size and [range]({% link v23.1/architecture/glossary.md %}#architecture-range) counts. #127699 +- In the [Database]({% link v23.1/ui-databases-page.md %}) details page, the table name will no longer appear with quotes around the schema and table name. #127763 +- [`ALTER ROLE`]({% link v23.1/alter-role.md %}) events in the [DB Console]({% link v23.1/ui-overview.md %}) event log now render correctly when the event does not contain any role options. #126563 +- Fixed a bug where the [Databases]({% link v24.1/ui-databases-page.md %}) page crashed if the range information was not available. #127090

Bug fixes

-- Fixed a bug in which the [`DISCARD`]({% link v23.1/alter-table.md %}) statement was disallowed with [`default_transaction_read_only = on`]({% link v23.1/session-variables.md %}). [#127549][#127549] -- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v23.1/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v23.1/expression-indexes.md %}). [#126600][#126600] -- Fixed a bug when [restoring]({% link v23.1/restore.md %}) a database with a [composite type]({% link v23.1/create-type.md %}#create-a-composite-data-type). [#126847][#126847] -- Fixed a bug when inputting [`public` role]({% link v23.1/security-reference/authorization.md %}#default-roles) as user name for [built-in compatibility functions]({% link v23.1/functions-and-operators.md %}#compatibility-functions), such as `has_database_privilege` and `has_schema_privilege`. [#126853][#126853] -- Fixed a bug where CockroachDB could incorrectly evaluate an [`IS NOT NULL`]({% link v23.1/null-handling.md %}#nulls-and-simple-comparisons) filter if it was applied to non-`NULL` tuples that had `NULL` elements, such as `(1, NULL)` or `(NULL, NULL)`. This bug has existed since v20.2. [#126942][#126942] -- Fixed a bug where [`CREATE TABLE`]({% link v23.1/create-table.md %}) with [index expressions]({% link v23.1/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v23.1/transaction-retry-error-reference.md %}). [#126200][#126200] -- Fixed a bug that caused a memory leak when executing SQL statements with comments, for example, `SELECT /* comment */ 1;`. Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the [SQL session]({% link v24.2/show-sessions.md %}). This bug has been present since v23.1. [#127757][#127757] -- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v23.1/create-index.md %}) would not correctly short-circuit if the given index already existed. [#128239][#128239] -- Fixed a bug in syntax validation, in which the `DESCENDING` clause was not allowed for non-terminal columns of an [inverted index]({% link v23.1/inverted-indexes.md %}). Only the last column of an inverted index should be prevented from being `DESCENDING`. This is now properly checked. [#128239][#128239] -- Fixed a bug where an [index]({% link v23.1/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. [#128239][#128239] -- Fixed a memory leak that could occur when specifying a non-existent virtual cluster name in the connection string. [#128110][#128110] -- Setting or dropping a default value on a [computed column]({% link v23.1/computed-columns.md %}) is now blocked, even for `NULL` defaults. Previously, setting or dropping a default value on a computed column was a no-op. [#128468][#128468] +- Fixed a bug in which the [`DISCARD`]({% link v23.1/alter-table.md %}) statement was disallowed with [`default_transaction_read_only = on`]({% link v23.1/session-variables.md %}). #127549 +- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v23.1/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v23.1/expression-indexes.md %}). #126600 +- Fixed a bug when [restoring]({% link v23.1/restore.md %}) a database with a [composite type]({% link v23.1/create-type.md %}#create-a-composite-data-type). #126847 +- Fixed a bug when inputting [`public` role]({% link v23.1/security-reference/authorization.md %}#default-roles) as user name for [built-in compatibility functions]({% link v23.1/functions-and-operators.md %}#compatibility-functions), such as `has_database_privilege` and `has_schema_privilege`. #126853 +- Fixed a bug where CockroachDB could incorrectly evaluate an [`IS NOT NULL`]({% link v23.1/null-handling.md %}#nulls-and-simple-comparisons) filter if it was applied to non-`NULL` tuples that had `NULL` elements, such as `(1, NULL)` or `(NULL, NULL)`. This bug has existed since v20.2. #126942 +- Fixed a bug where [`CREATE TABLE`]({% link v23.1/create-table.md %}) with [index expressions]({% link v23.1/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v23.1/transaction-retry-error-reference.md %}). #126200 +- Fixed a bug that caused a memory leak when executing SQL statements with comments, for example, `SELECT /* comment */ 1;`. Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the [SQL session]({% link v24.2/show-sessions.md %}). This bug has been present since v23.1. #127757 +- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v23.1/create-index.md %}) would not correctly short-circuit if the given index already existed. #128239 +- Fixed a bug in syntax validation, in which the `DESCENDING` clause was not allowed for non-terminal columns of an [inverted index]({% link v23.1/inverted-indexes.md %}). Only the last column of an inverted index should be prevented from being `DESCENDING`. This is now properly checked. #128239 +- Fixed a bug where an [index]({% link v23.1/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. #128239 +- Fixed a memory leak that could occur when specifying a non-existent virtual cluster name in the connection string. #128110 +- Setting or dropping a default value on a [computed column]({% link v23.1/computed-columns.md %}) is now blocked, even for `NULL` defaults. Previously, setting or dropping a default value on a computed column was a no-op. #128468 -[#126200]: https://github.com/cockroachdb/cockroach/pull/126200 -[#126415]: https://github.com/cockroachdb/cockroach/pull/126415 -[#126563]: https://github.com/cockroachdb/cockroach/pull/126563 -[#126600]: https://github.com/cockroachdb/cockroach/pull/126600 -[#126847]: https://github.com/cockroachdb/cockroach/pull/126847 -[#126853]: https://github.com/cockroachdb/cockroach/pull/126853 -[#126862]: https://github.com/cockroachdb/cockroach/pull/126862 -[#126942]: https://github.com/cockroachdb/cockroach/pull/126942 -[#127005]: https://github.com/cockroachdb/cockroach/pull/127005 -[#127090]: https://github.com/cockroachdb/cockroach/pull/127090 -[#127537]: https://github.com/cockroachdb/cockroach/pull/127537 -[#127549]: https://github.com/cockroachdb/cockroach/pull/127549 -[#127606]: https://github.com/cockroachdb/cockroach/pull/127606 -[#127699]: https://github.com/cockroachdb/cockroach/pull/127699 -[#127757]: https://github.com/cockroachdb/cockroach/pull/127757 -[#127763]: https://github.com/cockroachdb/cockroach/pull/127763 -[#128110]: https://github.com/cockroachdb/cockroach/pull/128110 -[#128239]: https://github.com/cockroachdb/cockroach/pull/128239 -[#128468]: https://github.com/cockroachdb/cockroach/pull/128468 -[#128586]: https://github.com/cockroachdb/cockroach/pull/128586 -[#128751]: https://github.com/cockroachdb/cockroach/pull/128751 diff --git a/src/current/_includes/releases/v23.1/v23.1.26.md b/src/current/_includes/releases/v23.1/v23.1.26.md index b027c8fa013..6fec74b483a 100644 --- a/src/current/_includes/releases/v23.1/v23.1.26.md +++ b/src/current/_includes/releases/v23.1/v23.1.26.md @@ -10,13 +10,13 @@ Release Date: September 12, 2024 1. A string that contains a JSON array of valid issuer URIs. Example: `['example.com/adfs','https://accounts.google.com']` 1. A string that contains a JSON map of valid issuer URIs to corresponding JWKS URIs and deserialized into a map of issuer URLs to corresponding JWKS URIs. A JSON map overrides the JWKS URI published in the issuer's `well-known/` endpoint. Example: `'{ "issuer_jwks_map": { "https://accounts.google.com": "https://www.googleapis.com/oauth2/v3/certs", "example.com/adfs": "https://example.com/adfs/discovery/keys" } }'` - When `issuer_jwks_map` is set, the key set is fetched from the JWKS URI directly. Otherwise, when `JWKSAutoFetchEnabled` is set, the JWKS URI is fetched from the issuer's `well-known/` endpoint. [#128669][#128669] + When `issuer_jwks_map` is set, the key set is fetched from the JWKS URI directly. Otherwise, when `JWKSAutoFetchEnabled` is set, the JWKS URI is fetched from the issuer's `well-known/` endpoint. #128669

Operational changes

- New [structured logging events]({% link v23.1/logging.md %}) in the `OPS` channel report broken connections and related transactions during node shutdown. - `node_shutdown_connection_timeout`: Logged if there are still open client connections after the timeout defined by `server.shutdown.connections.timeout` expires. - - `node_shutdown_transaction_timeout`: Logged if there are still open transactions on those open client connections after the timeout defined by `server.shutdown.transactions.timeout` expires. [#128709][#128709] + - `node_shutdown_transaction_timeout`: Logged if there are still open transactions on those open client connections after the timeout defined by `server.shutdown.transactions.timeout` expires. #128709

Bug fixes

@@ -25,17 +25,10 @@ Release Date: September 12, 2024 - `EXPLAIN (OPT, REDACT) CREATE VIEW` - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` - [#128487][#128487] + #128487 -- Fixed a bug where incorrect values could be produced for virtual [computed columns]({% link v23.1/computed-columns.md %}) in rare cases when the virtual column expression's type did not match the type of the virtual column. [#129008][#129008] -- Fixed a bug where errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` could be logged when accessing a table with an [expression index]({% link v23.1/expression-indexes.md %}) where the expression evaluates to an `ENUM` type. Example: `CREATE INDEX ON t ((col::an_enum))` [#129091][#129091] -- Fixed a bug introduced in v23.1 where a [user-defined function's]({% link v23.1/user-defined-functions.md %}) return type's parameters could not be named when dropping a user-defined function or procedure. [#115906][#115906] -- Fixed a slow-building memory leak when a cluster uses [GSSAPI Kerberos authentication]({% link v23.1/gssapi_authentication.md %}). [#130320][#130320] +- Fixed a bug where incorrect values could be produced for virtual [computed columns]({% link v23.1/computed-columns.md %}) in rare cases when the virtual column expression's type did not match the type of the virtual column. #129008 +- Fixed a bug where errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` could be logged when accessing a table with an [expression index]({% link v23.1/expression-indexes.md %}) where the expression evaluates to an `ENUM` type. Example: `CREATE INDEX ON t ((col::an_enum))` #129091 +- Fixed a bug introduced in v23.1 where a [user-defined function's]({% link v23.1/user-defined-functions.md %}) return type's parameters could not be named when dropping a user-defined function or procedure. #115906 +- Fixed a slow-building memory leak when a cluster uses [GSSAPI Kerberos authentication]({% link v23.1/gssapi_authentication.md %}). #130320 -[#115906]: https://github.com/cockroachdb/cockroach/pull/115906 -[#128487]: https://github.com/cockroachdb/cockroach/pull/128487 -[#128669]: https://github.com/cockroachdb/cockroach/pull/128669 -[#128709]: https://github.com/cockroachdb/cockroach/pull/128709 -[#129008]: https://github.com/cockroachdb/cockroach/pull/129008 -[#129091]: https://github.com/cockroachdb/cockroach/pull/129091 -[#130320]: https://github.com/cockroachdb/cockroach/pull/130320 diff --git a/src/current/_includes/releases/v23.1/v23.1.27.md b/src/current/_includes/releases/v23.1/v23.1.27.md index e474b6b76e1..9a26183a213 100644 --- a/src/current/_includes/releases/v23.1/v23.1.27.md +++ b/src/current/_includes/releases/v23.1/v23.1.27.md @@ -5,6 +5,5 @@ Release Date: October 3, 2024 {% include releases/new-release-downloads-docker-image.md release=include.release %}

Bug fixes

-- Fixed a rare bug where a [lease transfer]({% link v23.1/architecture/replication-layer.md %}#epoch-based-leases-table-data) could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was [overloaded]({% link v23.1/ui-overload-dashboard.md %}) and failing to heartbeat its [node liveness]({% link v23.1/cluster-setup-troubleshooting.md %}#node-liveness-issues) record. [#131679][#131679] +- Fixed a rare bug where a [lease transfer]({% link v23.1/architecture/replication-layer.md %}#epoch-based-leases-table-data) could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was [overloaded]({% link v23.1/ui-overload-dashboard.md %}) and failing to heartbeat its [node liveness]({% link v23.1/cluster-setup-troubleshooting.md %}#node-liveness-issues) record. #131679 -[#131679]: https://github.com/cockroachdb/cockroach/pull/131679 diff --git a/src/current/_includes/releases/v23.1/v23.1.28.md b/src/current/_includes/releases/v23.1/v23.1.28.md index 3b030cc944c..53b330b6686 100644 --- a/src/current/_includes/releases/v23.1/v23.1.28.md +++ b/src/current/_includes/releases/v23.1/v23.1.28.md @@ -6,44 +6,27 @@ Release Date: October 10, 2024

Security changes

-- [`SHOW JOBS`]({% link v23.1/show-jobs.md %}) and its variants [`SHOW CHANGEFEED JOB`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) and [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) no longer expose user sensitive information like `client_key`. [#129910][#129910] +- [`SHOW JOBS`]({% link v23.1/show-jobs.md %}) and its variants [`SHOW CHANGEFEED JOB`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) and [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}#show-changefeed-jobs) no longer expose user sensitive information like `client_key`. #129910

General changes

-- Upgraded [gRPC]({% link v23.1/architecture/distribution-layer.md %}#grpc) to v1.56.3. [#130044][#130044] +- Upgraded [gRPC]({% link v23.1/architecture/distribution-layer.md %}#grpc) to v1.56.3. #130044

Operational changes

-- Added the `ranges.decommissioning` metric that represents the number of [ranges]({% link v23.1/architecture/glossary.md %}#range) which have a [replica]({% link v23.1/architecture/glossary.md %}#replica) on a [decommissioning node]({% link v23.1/node-shutdown.md %}). [#130254][#130254] +- Added the `ranges.decommissioning` metric that represents the number of [ranges]({% link v23.1/architecture/glossary.md %}#range) which have a [replica]({% link v23.1/architecture/glossary.md %}#replica) on a [decommissioning node]({% link v23.1/node-shutdown.md %}). #130254

DB Console changes

-- DB Console will show an alert message when the [license]({% link v23.1/licensing-faqs.md %}) is expired or there are less than 15 days left before it expires. [#130439][#130439] -- DB Console will show a notification alerting customers without an Enterprise [license]({% link v23.1/licensing-faqs.md %}) to upcoming license changes with a link to more information. [#130439][#130439] +- DB Console will show an alert message when the [license]({% link v23.1/licensing-faqs.md %}) is expired or there are less than 15 days left before it expires. #130439 +- DB Console will show a notification alerting customers without an Enterprise [license]({% link v23.1/licensing-faqs.md %}) to upcoming license changes with a link to more information. #130439

Bug fixes

-- Fixed a bug where the [`schema_locked` table parameter]({% link v23.1/with-storage-parameter.md %}#table-parameters) did not prevent a table from being referenced by a [foreign key]({% link v23.1/foreign-key.md %}). [#129752][#129752] -- Fixed a bug where the [`require_explicit_primary_keys`]({% link v23.1/session-variables.md %}#require-explicit-primary-keys) session variable would overly aggressively prevent all [`CREATE TABLE`]({% link v23.1/create-table.md %}) statements from working. [#129905][#129905] -- Fixed a rare bug where a [lease transfer]({% link v23.1/architecture/replication-layer.md %}#epoch-based-leases-table-data) could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was [overloaded]({% link v23.1/ui-overload-dashboard.md %}) and failing to heartbeat its [node liveness]({% link v23.1/cluster-setup-troubleshooting.md %}#node-liveness-issues) record. [#130124][#130124] -- Resolve a log message that read: `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. This message is no longer possible. [#130623][#130623] -- Fixed a potential memory leak in [changefeeds]({% link v23.1/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v23.1/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both `changefeed.fast_gzip.enabled` and `changefeed.cloudstorage.async_flush.enabled` are `true` and the changefeed received an error while attempting to write to the cloud storage sink. [#130613][#130613] - - -[#128068]: https://github.com/cockroachdb/cockroach/pull/128068 -[#129752]: https://github.com/cockroachdb/cockroach/pull/129752 -[#129905]: https://github.com/cockroachdb/cockroach/pull/129905 -[#129910]: https://github.com/cockroachdb/cockroach/pull/129910 -[#130044]: https://github.com/cockroachdb/cockroach/pull/130044 -[#130124]: https://github.com/cockroachdb/cockroach/pull/130124 -[#130254]: https://github.com/cockroachdb/cockroach/pull/130254 -[#130316]: https://github.com/cockroachdb/cockroach/pull/130316 -[#130439]: https://github.com/cockroachdb/cockroach/pull/130439 -[#130613]: https://github.com/cockroachdb/cockroach/pull/130613 -[#130623]: https://github.com/cockroachdb/cockroach/pull/130623 -[#130689]: https://github.com/cockroachdb/cockroach/pull/130689 -[#130816]: https://github.com/cockroachdb/cockroach/pull/130816 -[#130820]: https://github.com/cockroachdb/cockroach/pull/130820 -[038cda982]: https://github.com/cockroachdb/cockroach/commit/038cda982 -[7f4a0e989]: https://github.com/cockroachdb/cockroach/commit/7f4a0e989 -[d5a2b0d4a]: https://github.com/cockroachdb/cockroach/commit/d5a2b0d4a +- Fixed a bug where the [`schema_locked` table parameter]({% link v23.1/with-storage-parameter.md %}#table-parameters) did not prevent a table from being referenced by a [foreign key]({% link v23.1/foreign-key.md %}). #129752 +- Fixed a bug where the [`require_explicit_primary_keys`]({% link v23.1/session-variables.md %}#require-explicit-primary-keys) session variable would overly aggressively prevent all [`CREATE TABLE`]({% link v23.1/create-table.md %}) statements from working. #129905 +- Fixed a rare bug where a [lease transfer]({% link v23.1/architecture/replication-layer.md %}#epoch-based-leases-table-data) could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was [overloaded]({% link v23.1/ui-overload-dashboard.md %}) and failing to heartbeat its [node liveness]({% link v23.1/cluster-setup-troubleshooting.md %}#node-liveness-issues) record. #130124 +- Resolve a log message that read: `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. This message is no longer possible. #130623 +- Fixed a potential memory leak in [changefeeds]({% link v23.1/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v23.1/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both `changefeed.fast_gzip.enabled` and `changefeed.cloudstorage.async_flush.enabled` are `true` and the changefeed received an error while attempting to write to the cloud storage sink. #130613 + + diff --git a/src/current/_includes/releases/v23.1/v23.1.29.md b/src/current/_includes/releases/v23.1/v23.1.29.md index c0ca86e7318..5a9af1bc91d 100644 --- a/src/current/_includes/releases/v23.1/v23.1.29.md +++ b/src/current/_includes/releases/v23.1/v23.1.29.md @@ -6,18 +6,18 @@ Release Date: November 18, 2024

General changes

-- Changed the license CockroachDB is distributed under to the new CockroachDB Software License (CSL). [#131704][#131704] [#131916][#131916] [#131917][#131917] [#131966][#131966] [#131971][#131971] [#131969][#131969] [#131980][#131980] [#131979][#131979] [#131974][#131974] [#131975][#131975] [#132052][#132052] [#132059][#132059] [#132804][#132804] [#132800][#132800] -- Attempting to install a second enterprise trial license on the same cluster will now fail. [#131977][#131977] -- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has a Trial or Free license, or if the reporting job is unable to load any license at all. [#132347][#132347] +- Changed the license CockroachDB is distributed under to the new CockroachDB Software License (CSL). #131704 #131916 #131917 #131966 #131971 #131969 #131980 #131979 #131974 #131975 #132052 #132059 #132804 #132800 +- Attempting to install a second enterprise trial license on the same cluster will now fail. #131977 +- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has a Trial or Free license, or if the reporting job is unable to load any license at all. #132347

{{ site.data.products.enterprise }} edition changes

-- A new `changefeed.total_ranges` metric has been added and can be used to monitor the number of ranges that are watched by changefeed aggregators. It shares the same polling interval as `changefeed.lagging_ranges`, which is controlled by the existing `changefeed.lagging_ranges_polling_interval` cluster setting. [#131246][#131246] -- Allowed access to DB console APIs via JWT, which can be supplied as a Bearer token in the Authorization header. [#133457][#133457] +- A new `changefeed.total_ranges` metric has been added and can be used to monitor the number of ranges that are watched by changefeed aggregators. It shares the same polling interval as `changefeed.lagging_ranges`, which is controlled by the existing `changefeed.lagging_ranges_polling_interval` cluster setting. #131246 +- Allowed access to DB console APIs via JWT, which can be supplied as a Bearer token in the Authorization header. #133457

DB Console changes

-- DB Console will reflect any throttling behavior from the cluster due to an expired license or missing telemetry data. Enterprise licenses are not affected. [#131858][#131858] +- DB Console will reflect any throttling behavior from the cluster due to an expired license or missing telemetry data. Enterprise licenses are not affected. #131858

Bug fixes

@@ -26,53 +26,22 @@ Release Date: November 18, 2024 2. The correlated subquery has a `GroupBy` or `DistinctOn` operator with an outer-column reference in its input. 3. The correlated subquery is in the input of a `Select` or `Join` operator 4. The `Select` or `Join` has a filter that sets the outer-column reference from (2) equal to a non-outer column in the input of the grouping operator. - 5. The grouping column set does not include the replacement column, and functionally determines the replacement column. [#130987][#130987] -- The AWS endpoint and cloud custom HTTP client configuration are now considered when implicit authentication is used, whereas previously these were only considered when using explicit credentials. [#131200][#131200] -- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug has existed since UDFs were introduced in v22.2. [#126414][#126414] -- Fixed a rare bug where a lease transfer could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was overloaded and failing to heartbeat its node liveness record. [#131838][#131838] -- Fixed an error that could happen if an aggregate function was used as the value in a `SET` command. [#131957][#131957] -- Fixed a bug where a span stats request on a mixed version cluster resulted in a null pointer exception (NPE). [#132685][#132685] + 5. The grouping column set does not include the replacement column, and functionally determines the replacement column. #130987 +- The AWS endpoint and cloud custom HTTP client configuration are now considered when implicit authentication is used, whereas previously these were only considered when using explicit credentials. #131200 +- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug has existed since UDFs were introduced in v22.2. #126414 +- Fixed a rare bug where a lease transfer could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was overloaded and failing to heartbeat its node liveness record. #131838 +- Fixed an error that could happen if an aggregate function was used as the value in a `SET` command. #131957 +- Fixed a bug where a span stats request on a mixed version cluster resulted in a null pointer exception (NPE). #132685 - Fixed a rare bug in which an update of a primary key column which is also the only column in a separate column family can sometimes fail to update the primary index. This bug has existed since v22.2. Requirements to hit the bug are: 1. A table with multiple column families. 2. A column family containing a single primary key column. 3. That column family is not the first column family. 4. That column family existed before its column was in the primary key. 5. That column must be of type `FLOAT4/8`, `DECIMAL`, `JSON`, collated `STRING`, or `ARRAY`. - 6. An update changes that column from a composite value to a non-composite value. [#132124][#132124] -- The `proretset` column of the `pg_catalog.pg_proc` table is now properly set to `true` for set-returning builtin functions. [#132873][#132873] -- Fixed a bug in the query optimizer which could cause CockroachDB nodes to crash in rare cases. The bug could occur when a query contained a filter of the form `col IN (elem0, elem1, ..., elemN)` such that `N` is very large, e.g. 1.6+ million, and when `col` exists in a hash-sharded index or exists in a table with an indexed, computed column dependent on `col`. [#133068][#133068] -- Users with the admin role can now run `ALTER DEFAULT PRIVILEGES FOR target_role ...` on any `target_role`. Previously, this could result in a privilege error, which is incorrect as admins are allowed to perform any operation. [#133067][#133067] -- `REASSIGN OWNED BY` will now transfer ownership of the public schema. Previously, it would always skip over the public schema even if it was owned by the target role. [#133067][#133067] -- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. [#131388][#131388] + 6. An update changes that column from a composite value to a non-composite value. #132124 +- The `proretset` column of the `pg_catalog.pg_proc` table is now properly set to `true` for set-returning builtin functions. #132873 +- Fixed a bug in the query optimizer which could cause CockroachDB nodes to crash in rare cases. The bug could occur when a query contained a filter of the form `col IN (elem0, elem1, ..., elemN)` such that `N` is very large, e.g. 1.6+ million, and when `col` exists in a hash-sharded index or exists in a table with an indexed, computed column dependent on `col`. #133068 +- Users with the admin role can now run `ALTER DEFAULT PRIVILEGES FOR target_role ...` on any `target_role`. Previously, this could result in a privilege error, which is incorrect as admins are allowed to perform any operation. #133067 +- `REASSIGN OWNED BY` will now transfer ownership of the public schema. Previously, it would always skip over the public schema even if it was owned by the target role. #133067 +- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. #131388 -[#126414]: https://github.com/cockroachdb/cockroach/pull/126414 -[#130987]: https://github.com/cockroachdb/cockroach/pull/130987 -[#131200]: https://github.com/cockroachdb/cockroach/pull/131200 -[#131246]: https://github.com/cockroachdb/cockroach/pull/131246 -[#131388]: https://github.com/cockroachdb/cockroach/pull/131388 -[#131704]: https://github.com/cockroachdb/cockroach/pull/131704 -[#131838]: https://github.com/cockroachdb/cockroach/pull/131838 -[#131858]: https://github.com/cockroachdb/cockroach/pull/131858 -[#131916]: https://github.com/cockroachdb/cockroach/pull/131916 -[#131917]: https://github.com/cockroachdb/cockroach/pull/131917 -[#131957]: https://github.com/cockroachdb/cockroach/pull/131957 -[#131966]: https://github.com/cockroachdb/cockroach/pull/131966 -[#131969]: https://github.com/cockroachdb/cockroach/pull/131969 -[#131971]: https://github.com/cockroachdb/cockroach/pull/131971 -[#131974]: https://github.com/cockroachdb/cockroach/pull/131974 -[#131975]: https://github.com/cockroachdb/cockroach/pull/131975 -[#131977]: https://github.com/cockroachdb/cockroach/pull/131977 -[#131979]: https://github.com/cockroachdb/cockroach/pull/131979 -[#131980]: https://github.com/cockroachdb/cockroach/pull/131980 -[#132052]: https://github.com/cockroachdb/cockroach/pull/132052 -[#132059]: https://github.com/cockroachdb/cockroach/pull/132059 -[#132124]: https://github.com/cockroachdb/cockroach/pull/132124 -[#132347]: https://github.com/cockroachdb/cockroach/pull/132347 -[#132452]: https://github.com/cockroachdb/cockroach/pull/132452 -[#132685]: https://github.com/cockroachdb/cockroach/pull/132685 -[#132800]: https://github.com/cockroachdb/cockroach/pull/132800 -[#132804]: https://github.com/cockroachdb/cockroach/pull/132804 -[#132873]: https://github.com/cockroachdb/cockroach/pull/132873 -[#133067]: https://github.com/cockroachdb/cockroach/pull/133067 -[#133068]: https://github.com/cockroachdb/cockroach/pull/133068 -[#133457]: https://github.com/cockroachdb/cockroach/pull/133457 diff --git a/src/current/_includes/releases/v23.1/v23.1.3.md b/src/current/_includes/releases/v23.1/v23.1.3.md index d6ea7009b3d..865268b9781 100644 --- a/src/current/_includes/releases/v23.1/v23.1.3.md +++ b/src/current/_includes/releases/v23.1/v23.1.3.md @@ -3,18 +3,18 @@ Release Date: June 13, 2023 {{site.data.alerts.callout_danger}} -A [bug](https://github.com/cockroachdb/cockroach/issues/104798) was discovered in a change included in v23.1.3 (this release). This bug can affect clusters upgrading to v23.1.3 from [v22.2.x]({% link releases/v22.2.md %}). In an affected cluster, jobs that were running during the upgrade could hang or fail to run after the upgrade is finalized. Users upgrading from v22.2.x are advised to use [v23.1.2](#v23-1-2) to upgrade, or to set the [`cluster.preserve_downgrade_option`]({% link v23.1/upgrade-cockroach-version.md %}#step-3-decide-how-the-upgrade-will-be-finalized) cluster setting to delay finalization of the upgrade until they can upgrade to v23.1.4. +A bug was discovered in a change included in v23.1.3 (this release). This bug can affect clusters upgrading to v23.1.3 from [v22.2.x]({% link releases/v22.2.md %}). In an affected cluster, jobs that were running during the upgrade could hang or fail to run after the upgrade is finalized. Users upgrading from v22.2.x are advised to use [v23.1.2](#v23-1-2) to upgrade, or to set the [`cluster.preserve_downgrade_option`]({% link v23.1/upgrade-cockroach-version.md %}#step-3-decide-how-the-upgrade-will-be-finalized) cluster setting to delay finalization of the upgrade until they can upgrade to v23.1.4. {{site.data.alerts.end}} {% include releases/new-release-downloads-docker-image.md release=include.release %}

Security updates

-- The new `server.client_cert_expiration_cache.capacity` [cluster setting]({% link v23.1/cluster-settings.md %}) allows you to configure the minimum time until a user's set of client certificates will expire. When `server.client_cert_expiration_cache.capacity` is set to a non-zero value, the new metric `security.certificate.expiration.client` tracks the number of client certificates that have expired. [#104165][#104165] +- The new `server.client_cert_expiration_cache.capacity` [cluster setting]({% link v23.1/cluster-settings.md %}) allows you to configure the minimum time until a user's set of client certificates will expire. When `server.client_cert_expiration_cache.capacity` is set to a non-zero value, the new metric `security.certificate.expiration.client` tracks the number of client certificates that have expired. #104165

{{ site.data.products.enterprise }} edition changes

-- Role-based audit logging is now an {{ site.data.products.enterprise }} feature. Only {{ site.data.products.enterprise }} users will be able to configure role-based audit logging using the `sql.log.user_audit` [cluster setting]({% link v23.1/cluster-settings.md %}). [#104453][#104453] +- Role-based audit logging is now an {{ site.data.products.enterprise }} feature. Only {{ site.data.products.enterprise }} users will be able to configure role-based audit logging using the `sql.log.user_audit` [cluster setting]({% link v23.1/cluster-settings.md %}). #104453 - The new `sql.log.user_audit` [cluster setting]({% link v23.1/cluster-settings.md %}) enables role-based auditing. When the setting is enabled, you can set an audit logging configuration using a table-like syntax. Each row in the configuration represents an _audit setting_ in the configuration. An audit setting is comprised of the following columns: `USER/ROLE` and `STATEMENT_FILTER`: ~~~sql @@ -25,123 +25,74 @@ A [bug](https://github.com/cockroachdb/cockroach/issues/104798) was discovered i '; ~~~ - [#104179][#104179] -- Introduced the `sql.log.user_audit.reduced_config.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). When enabled, this cluster setting computes a "reduced" [audit configuration](https://www.cockroachlabs.com/docs/v23.1/eventlog#role_based_audit_event) based on the user's current role memberships and the current value for the `sql.log.user_audit` cluster setting. The "reduced" audit configuration is computed at the **first SQL event emit by the user, after the setting is enabled**. When the cluster setting is enabled, CockroachDB can compute the audit configuration once at session start, which provides around a 5% increase in throughput. However, changes to the audit configuration (user role memberships or cluster setting configuration) are not reflected within a session. A new session should be started to reflect the configuration changes in auditing behavior. [#104372][#104372] + #104179 +- Introduced the `sql.log.user_audit.reduced_config.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}). When enabled, this cluster setting computes a "reduced" [audit configuration](https://www.cockroachlabs.com/docs/v23.1/eventlog#role_based_audit_event) based on the user's current role memberships and the current value for the `sql.log.user_audit` cluster setting. The "reduced" audit configuration is computed at the **first SQL event emit by the user, after the setting is enabled**. When the cluster setting is enabled, CockroachDB can compute the audit configuration once at session start, which provides around a 5% increase in throughput. However, changes to the audit configuration (user role memberships or cluster setting configuration) are not reflected within a session. A new session should be started to reflect the configuration changes in auditing behavior. #104372

SQL language changes

-- When the `sql.trace.stmt.enable_threshold` or `sql.trace.txn.enable_threshold` [cluster settings]({% link v23.1/cluster-settings.md %}) is enabled, the logging output is now emitted on the [`SQL_EXEC` channel](https://www.cockroachlabs.com/docs/v23.1/logging#sql_exec). Previously, this was emitted to the [`DEV` channel]({% link v23.1/logging.md %}#dev). [#104047][#104047] -- [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) now lists privileges inherited by role membership. `SHOW GRANTS ON ROLE` statements no longer require any privileges and also lists implicit grantees. [#104587][#104587] +- When the `sql.trace.stmt.enable_threshold` or `sql.trace.txn.enable_threshold` [cluster settings]({% link v23.1/cluster-settings.md %}) is enabled, the logging output is now emitted on the [`SQL_EXEC` channel](https://www.cockroachlabs.com/docs/v23.1/logging#sql_exec). Previously, this was emitted to the [`DEV` channel]({% link v23.1/logging.md %}#dev). #104047 +- [`SHOW GRANTS`]({% link v23.1/show-grants.md %}) now lists privileges inherited by role membership. `SHOW GRANTS ON ROLE` statements no longer require any privileges and also lists implicit grantees. #104587

Operational changes

-- The new [metric]({% link v23.1/metrics.md %}) `leases.liveness` shows the number of [liveness range]({% link v23.1/configure-replication-zones.md %}#create-a-replication-zone-for-a-system-range) leases per node to track the liveness range leaseholder. [#104076][#104076] +- The new [metric]({% link v23.1/metrics.md %}) `leases.liveness` shows the number of [liveness range]({% link v23.1/configure-replication-zones.md %}#create-a-replication-zone-for-a-system-range) leases per node to track the liveness range leaseholder. #104076 - The new a gauge metric `sql.conns_waiting_to_hash` counts the number of connection attempts that are being limited due to the number of concurrent password hashing operations. This behavior has been present since v21.2 to [prevent password hashing from increasing CPU load]({% link v23.1/query-behavior-troubleshooting.md %}#high-client-cpu-load-connection-pool-exhaustion-or-increased-connection-latency-when-scram-password-based-authentication-is-enabled). The metric is expected to be `0`, or close to `0`, in a healthy setup. If the metric is consistently high and connection latencies are high, then an operator should do one or more of the following: - Ensure applications using the cluster have properly configured [connection pools]({% link v23.1/connection-pooling.md %}). - Add more vCPU or more nodes to the cluster. - - Increase the password hashing concurrency using the `COCKROACH_MAX_PW_HASH_COMPUTE_CONCURRENCY` [environment variable]({% link v23.1/cockroach-commands.md %}#environment-variables). [#104388][#104388] + - Increase the password hashing concurrency using the `COCKROACH_MAX_PW_HASH_COMPUTE_CONCURRENCY` [environment variable]({% link v23.1/cockroach-commands.md %}#environment-variables). #104388

DB Console changes

-- Added merge queue failure and merge queue processing time metrics to the [**Queue Processing Failures**]({% link v23.1/ui-queues-dashboard.md %}#queue-processing-failures) and [**Queue Processing Times**]({% link v23.1/ui-queues-dashboard.md %}#queue-processing-times) graphs respectively. [#104034][#104034] -- Removed the **Circuit Breaker Tripped Events** graph from the [**Replication**]({% link v23.1/ui-replication-dashboard.md %}) dashboard. [#104038][#104038] -- Added **Completed time** to [**Jobs**]({% link v23.1/ui-jobs-page.md %}) and [**Job details**]({% link v23.1/ui-jobs-page.md %}#job-details) pages, and updated the time format on those pages to include seconds and milliseconds. [#104063][#104063] -- Added a **Created SQL Connections** chart on the [**SQL**]({% link v23.1/ui-sql-dashboard.md %}) dashboard under **Metrics**. [#104072][#104072] -- Added **Ranges in Catchup Mode** and **Rangefeed Catchup Scans Duration** charts to the [**Changefeeds**]({% link v23.1/ui-cdc-dashboard.md %}) dashboard. [#104122][#104122] -- The sort setting on the [**Hot Ranges**]({% link v23.1/ui-hot-ranges-page.md %}) page is now persisted across page reloads and navigation. [#104112][#104112] -- The [**Databases**]({% link v23.1/ui-databases-page.md %}) page now supports a large number of tables for a single database. If more than 40 tables are present in a database, the sort on the page will be disabled; however, it is still possible to filter by table name. [#103854][#103854] -- On the **Statement Details** page, renamed the "Idle Latency" metric to "Client Wait Time" and separated it into its own chart. [#103915][#103915] +- Added merge queue failure and merge queue processing time metrics to the [**Queue Processing Failures**]({% link v23.1/ui-queues-dashboard.md %}#queue-processing-failures) and [**Queue Processing Times**]({% link v23.1/ui-queues-dashboard.md %}#queue-processing-times) graphs respectively. #104034 +- Removed the **Circuit Breaker Tripped Events** graph from the [**Replication**]({% link v23.1/ui-replication-dashboard.md %}) dashboard. #104038 +- Added **Completed time** to [**Jobs**]({% link v23.1/ui-jobs-page.md %}) and [**Job details**]({% link v23.1/ui-jobs-page.md %}#job-details) pages, and updated the time format on those pages to include seconds and milliseconds. #104063 +- Added a **Created SQL Connections** chart on the [**SQL**]({% link v23.1/ui-sql-dashboard.md %}) dashboard under **Metrics**. #104072 +- Added **Ranges in Catchup Mode** and **Rangefeed Catchup Scans Duration** charts to the [**Changefeeds**]({% link v23.1/ui-cdc-dashboard.md %}) dashboard. #104122 +- The sort setting on the [**Hot Ranges**]({% link v23.1/ui-hot-ranges-page.md %}) page is now persisted across page reloads and navigation. #104112 +- The [**Databases**]({% link v23.1/ui-databases-page.md %}) page now supports a large number of tables for a single database. If more than 40 tables are present in a database, the sort on the page will be disabled; however, it is still possible to filter by table name. #103854 +- On the **Statement Details** page, renamed the "Idle Latency" metric to "Client Wait Time" and separated it into its own chart. #103915 - Added more search criteria options on the [**SQL Activity**]({% link v23.1/ui-overview.md %}#sql-activity) page: - For the **Top** dropdown: `1000`, `5000`, and `10000`. - For the **By** dropdown on the **Statements** tab: Last Execution Time, Max Latency, Max Memory, Min Latency, Network, P50 Latency, P90 Latency, Retries, and Rows Processed. - - For the **By** dropdown on the **Transactions** tab: Max Memory, Network, Retries, and Rows Processed. [#104054][#104054] -- Added a new link to the **Range Status** page on the [**Replication**]({% link v23.1/ui-replication-dashboard.md %}) dashboard that opens the **Enqueue Ranges** page with the node ID already completed. [#104100][#104100] -- The DB Console will no longer show `DROP INDEX` recommendations for unique indexes. [#104287][#104287] -- On the **Transactions** tab of the [**SQL Activity**]({% link v23.1/ui-overview.md %}#sql-activity) page, the **Status** will be `Idle` if the executing transaction is not currently executing a statement. Previously, the status would be `Executing`. [#103925][#103925] -- Fixed the job that updates the `statement_activity` and `transaction_activity` tables on the [**SQL Activity**]({% link v23.1/ui-overview.md %}#sql-activity) page. The table size is now constrained when there are a lot of unique queries. This fix helps to prevent slowing down the **SQL Activity** page. [#104340][#104340] + - For the **By** dropdown on the **Transactions** tab: Max Memory, Network, Retries, and Rows Processed. #104054 +- Added a new link to the **Range Status** page on the [**Replication**]({% link v23.1/ui-replication-dashboard.md %}) dashboard that opens the **Enqueue Ranges** page with the node ID already completed. #104100 +- The DB Console will no longer show `DROP INDEX` recommendations for unique indexes. #104287 +- On the **Transactions** tab of the [**SQL Activity**]({% link v23.1/ui-overview.md %}#sql-activity) page, the **Status** will be `Idle` if the executing transaction is not currently executing a statement. Previously, the status would be `Executing`. #103925 +- Fixed the job that updates the `statement_activity` and `transaction_activity` tables on the [**SQL Activity**]({% link v23.1/ui-overview.md %}#sql-activity) page. The table size is now constrained when there are a lot of unique queries. This fix helps to prevent slowing down the **SQL Activity** page. #104340

Bug fixes

-- Fixed a panic that could occur while a [`COPY`]({% link v23.1/copy.md %}) statement is logged for telemetry purposes. [#103848][#103848] -- Fixed a bug where disk space used by deleted and [garbage collected](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer#garbage-collection) data would not be reclaimed in a timely manner, especially when a store has low-write workload. [#103865][#103865] -- Fixed a problem that could lead to erroneously refused lease transfers, causing the following error message: `refusing to transfer lease to [...] because target may need a Raft snapshot: replica in StateProbe`. [#103849][#103849] -- Fixed a bug where [`COPY`]({% link v23.1/copy.md %}) in v23.1.0 and beta versions would incorrectly encode data with multiple [column families]({% link v23.1/column-families.md %}). The data must be dropped and re-imported to be encoded correctly. [#103393][#103393] -- Fixed a bug where running a [`debug` command]({% link v23.1/cockroach-commands.md %}#commands) that manipulates a store (e.g., `debug compact`) without first terminating the node using the store, could result in corruption of the node's store if encryption-at-rest was enabled. [#102877][#102877] -- Fixed a bug where [`SHOW DEFAULT PRIVILEGES`]({% link v23.1/show-default-privileges.md %}) did not work correctly if the database name or schema name being inspected had upper-case or special characters. [#103952][#103952] -- Fixed a bug that could cause queries with [joins]({% link v23.1/joins.md %}) or [subqueries]({% link v23.1/subqueries.md %}) to omit rows where column values are `NULL` in very rare cases. This bug was present since v20.2. [#103803][#103803] -- Fixed a bug that could cause goroutines to hang during [SCRAM authentication](https://www.cockroachlabs.com/docs/v23.1/security-reference/scram-authentication). [#104197][#104197] -- The `sys.cpu.combined.percent-normalized` metric now uses `GOMAXPROCS`, if lower than the number of CPU shares when calculating CPU utilization. [#104191][#104191] -- The warning message about a missing `--advertise-addr` flag is no longer displayed when the flag is specified on server start. [#104227][#104227] -- Fixed a rare bug where stale multi-column table [statistics]({% link v23.1/cost-based-optimizer.md %}) could cause table statistics forecasts to be inaccurate, leading to un-optimal query plans. [#104229][#104229] -- Fixed a bug in v23.1.0 where the `node_id` field would be omitted from logs. The `node_id` value has now been restored to the logs. [#103798][#103798] -- Fixed a bug where the collection of `KV bytes read` and `KV gRPC calls` execution statistics during [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}) could be incorrect (it would remain at zero) in some cases. The bug was introduced in the v23.1.0 release. [#104194][#104194] -- Fixed an incorrect results bug in v22.2.10 and v23.1.2 and above when the `optimizer_use_improved_computed_column_filters_derivation` [session setting]({% link v23.1/set-vars.md %}) is `true`. Predicates on [computed columns]({% link v23.1/computed-columns.md %}) are derived when an ORed predicate on a column in the computed column expression is present. [#104276][#104276] -- The [`keyvisualizer` job]({% link v23.1/ui-key-visualizer.md %}) no longer panics if an error is encountered while cleaning up stale samples. Instead, if the job encounters an error, the job will try again later. [#104374][#104374] -- Fixed a bug where it was possible for a SQL row to be split across two ranges. When this occurred, SQL queries could return unexpected errors. The real keys are now inspected rather than just request keys to determine load-based split points. [#103876][#103876] -- Fixed a bug which could cause a panic when a [`CREATE FUNCTION`]({% link v23.1/create-function.md %}) statement used the [`setval()` built-in function]({% link v23.1/functions-and-operators.md %}). [#104297][#104297] -- Fixed a rare race condition that could allow large [restore]({% link v23.1/restore.md %}) jobs to fail with an `unable to find store` error. [#100955][#100955] -- Fixed a bug where CockroachDB would not ignore the messages that it should, if there was an error while in the [PostgreSQL extended protocol]({% link v23.1/postgresql-compatibility.md %}). [#104459][#104459] -- Fixed a bug in which some [Google Cloud Platform]({% link v23.1/use-cloud-storage.md %})-related errors would be returned with an uninformative error. [#104065][#104065] -- Fixed a bug where in rare cases a panic could occur during shutdown in relation to the SQL activity computation. This bug was introduced in v23.1.0. [#104515][#104515] -- The backfill of `system.job_info` upgrade migration that runs during upgrades from v22.2 now processes rows in batches to avoid cases where it could become stuck due to [contention]({% link v23.1/performance-best-practices-overview.md %}#transaction-contention) and transaction retries. [#104574][#104574] -- Fixes a bug in which [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) would fail to show a [locality-aware backup]({% link v23.1/take-and-restore-locality-aware-backups.md %}) that contained [incremental backups]({% link v23.1/take-full-and-incremental-backups.md %}). [#103761][#103761] +- Fixed a panic that could occur while a [`COPY`]({% link v23.1/copy.md %}) statement is logged for telemetry purposes. #103848 +- Fixed a bug where disk space used by deleted and [garbage collected](https://www.cockroachlabs.com/docs/v23.1/architecture/storage-layer#garbage-collection) data would not be reclaimed in a timely manner, especially when a store has low-write workload. #103865 +- Fixed a problem that could lead to erroneously refused lease transfers, causing the following error message: `refusing to transfer lease to [...] because target may need a Raft snapshot: replica in StateProbe`. #103849 +- Fixed a bug where [`COPY`]({% link v23.1/copy.md %}) in v23.1.0 and beta versions would incorrectly encode data with multiple [column families]({% link v23.1/column-families.md %}). The data must be dropped and re-imported to be encoded correctly. #103393 +- Fixed a bug where running a [`debug` command]({% link v23.1/cockroach-commands.md %}#commands) that manipulates a store (e.g., `debug compact`) without first terminating the node using the store, could result in corruption of the node's store if encryption-at-rest was enabled. #102877 +- Fixed a bug where [`SHOW DEFAULT PRIVILEGES`]({% link v23.1/show-default-privileges.md %}) did not work correctly if the database name or schema name being inspected had upper-case or special characters. #103952 +- Fixed a bug that could cause queries with [joins]({% link v23.1/joins.md %}) or [subqueries]({% link v23.1/subqueries.md %}) to omit rows where column values are `NULL` in very rare cases. This bug was present since v20.2. #103803 +- Fixed a bug that could cause goroutines to hang during [SCRAM authentication](https://www.cockroachlabs.com/docs/v23.1/security-reference/scram-authentication). #104197 +- The `sys.cpu.combined.percent-normalized` metric now uses `GOMAXPROCS`, if lower than the number of CPU shares when calculating CPU utilization. #104191 +- The warning message about a missing `--advertise-addr` flag is no longer displayed when the flag is specified on server start. #104227 +- Fixed a rare bug where stale multi-column table [statistics]({% link v23.1/cost-based-optimizer.md %}) could cause table statistics forecasts to be inaccurate, leading to un-optimal query plans. #104229 +- Fixed a bug in v23.1.0 where the `node_id` field would be omitted from logs. The `node_id` value has now been restored to the logs. #103798 +- Fixed a bug where the collection of `KV bytes read` and `KV gRPC calls` execution statistics during [`EXPLAIN ANALYZE`]({% link v23.1/explain-analyze.md %}) could be incorrect (it would remain at zero) in some cases. The bug was introduced in the v23.1.0 release. #104194 +- Fixed an incorrect results bug in v22.2.10 and v23.1.2 and above when the `optimizer_use_improved_computed_column_filters_derivation` [session setting]({% link v23.1/set-vars.md %}) is `true`. Predicates on [computed columns]({% link v23.1/computed-columns.md %}) are derived when an ORed predicate on a column in the computed column expression is present. #104276 +- The [`keyvisualizer` job]({% link v23.1/ui-key-visualizer.md %}) no longer panics if an error is encountered while cleaning up stale samples. Instead, if the job encounters an error, the job will try again later. #104374 +- Fixed a bug where it was possible for a SQL row to be split across two ranges. When this occurred, SQL queries could return unexpected errors. The real keys are now inspected rather than just request keys to determine load-based split points. #103876 +- Fixed a bug which could cause a panic when a [`CREATE FUNCTION`]({% link v23.1/create-function.md %}) statement used the [`setval()` built-in function]({% link v23.1/functions-and-operators.md %}). #104297 +- Fixed a rare race condition that could allow large [restore]({% link v23.1/restore.md %}) jobs to fail with an `unable to find store` error. #100955 +- Fixed a bug where CockroachDB would not ignore the messages that it should, if there was an error while in the [PostgreSQL extended protocol]({% link v23.1/postgresql-compatibility.md %}). #104459 +- Fixed a bug in which some [Google Cloud Platform]({% link v23.1/use-cloud-storage.md %})-related errors would be returned with an uninformative error. #104065 +- Fixed a bug where in rare cases a panic could occur during shutdown in relation to the SQL activity computation. This bug was introduced in v23.1.0. #104515 +- The backfill of `system.job_info` upgrade migration that runs during upgrades from v22.2 now processes rows in batches to avoid cases where it could become stuck due to [contention]({% link v23.1/performance-best-practices-overview.md %}#transaction-contention) and transaction retries. #104574 +- Fixes a bug in which [`SHOW BACKUP`]({% link v23.1/show-backup.md %}) would fail to show a [locality-aware backup]({% link v23.1/take-and-restore-locality-aware-backups.md %}) that contained [incremental backups]({% link v23.1/take-full-and-incremental-backups.md %}). #103761

Performance improvements

-- If [`transaction_rows_read_err`]({% link v23.1/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction) is set to a non-zero value, CockroachDB now ensures that any single scan never reads more than `transaction_rows_read_err` + 1 rows. This prevents transactions that would error due to the `transaction_rows_read_err` setting from causing a large performance overhead due to large scans. [#104364][#104364] -- Improved the efficiency of the an internal job that during upgrades deleted descriptors of dropped functions. Previously, the job would check every single ID until the max descriptor ID, which was particularly inefficient when this was large. Now, the job only queries the upper bound ID of each batch. [#104590][#104590] +- If [`transaction_rows_read_err`]({% link v23.1/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction) is set to a non-zero value, CockroachDB now ensures that any single scan never reads more than `transaction_rows_read_err` + 1 rows. This prevents transactions that would error due to the `transaction_rows_read_err` setting from causing a large performance overhead due to large scans. #104364 +- Improved the efficiency of the an internal job that during upgrades deleted descriptors of dropped functions. Previously, the job would check every single ID until the max descriptor ID, which was particularly inefficient when this was large. Now, the job only queries the upper bound ID of each batch. #104590

Contributors

This release includes 111 merged PRs by 41 authors. -[#100955]: https://github.com/cockroachdb/cockroach/pull/100955 -[#102877]: https://github.com/cockroachdb/cockroach/pull/102877 -[#103393]: https://github.com/cockroachdb/cockroach/pull/103393 -[#103761]: https://github.com/cockroachdb/cockroach/pull/103761 -[#103798]: https://github.com/cockroachdb/cockroach/pull/103798 -[#103803]: https://github.com/cockroachdb/cockroach/pull/103803 -[#103841]: https://github.com/cockroachdb/cockroach/pull/103841 -[#103848]: https://github.com/cockroachdb/cockroach/pull/103848 -[#103849]: https://github.com/cockroachdb/cockroach/pull/103849 -[#103854]: https://github.com/cockroachdb/cockroach/pull/103854 -[#103865]: https://github.com/cockroachdb/cockroach/pull/103865 -[#103876]: https://github.com/cockroachdb/cockroach/pull/103876 -[#103915]: https://github.com/cockroachdb/cockroach/pull/103915 -[#103925]: https://github.com/cockroachdb/cockroach/pull/103925 -[#103952]: https://github.com/cockroachdb/cockroach/pull/103952 -[#104034]: https://github.com/cockroachdb/cockroach/pull/104034 -[#104038]: https://github.com/cockroachdb/cockroach/pull/104038 -[#104047]: https://github.com/cockroachdb/cockroach/pull/104047 -[#104054]: https://github.com/cockroachdb/cockroach/pull/104054 -[#104063]: https://github.com/cockroachdb/cockroach/pull/104063 -[#104065]: https://github.com/cockroachdb/cockroach/pull/104065 -[#104072]: https://github.com/cockroachdb/cockroach/pull/104072 -[#104076]: https://github.com/cockroachdb/cockroach/pull/104076 -[#104100]: https://github.com/cockroachdb/cockroach/pull/104100 -[#104112]: https://github.com/cockroachdb/cockroach/pull/104112 -[#104122]: https://github.com/cockroachdb/cockroach/pull/104122 -[#104165]: https://github.com/cockroachdb/cockroach/pull/104165 -[#104179]: https://github.com/cockroachdb/cockroach/pull/104179 -[#104191]: https://github.com/cockroachdb/cockroach/pull/104191 -[#104194]: https://github.com/cockroachdb/cockroach/pull/104194 -[#104197]: https://github.com/cockroachdb/cockroach/pull/104197 -[#104227]: https://github.com/cockroachdb/cockroach/pull/104227 -[#104229]: https://github.com/cockroachdb/cockroach/pull/104229 -[#104276]: https://github.com/cockroachdb/cockroach/pull/104276 -[#104287]: https://github.com/cockroachdb/cockroach/pull/104287 -[#104297]: https://github.com/cockroachdb/cockroach/pull/104297 -[#104340]: https://github.com/cockroachdb/cockroach/pull/104340 -[#104361]: https://github.com/cockroachdb/cockroach/pull/104361 -[#104364]: https://github.com/cockroachdb/cockroach/pull/104364 -[#104372]: https://github.com/cockroachdb/cockroach/pull/104372 -[#104374]: https://github.com/cockroachdb/cockroach/pull/104374 -[#104388]: https://github.com/cockroachdb/cockroach/pull/104388 -[#104416]: https://github.com/cockroachdb/cockroach/pull/104416 -[#104453]: https://github.com/cockroachdb/cockroach/pull/104453 -[#104459]: https://github.com/cockroachdb/cockroach/pull/104459 -[#104515]: https://github.com/cockroachdb/cockroach/pull/104515 -[#104574]: https://github.com/cockroachdb/cockroach/pull/104574 -[#104587]: https://github.com/cockroachdb/cockroach/pull/104587 -[#104590]: https://github.com/cockroachdb/cockroach/pull/104590 diff --git a/src/current/_includes/releases/v23.1/v23.1.30.md b/src/current/_includes/releases/v23.1/v23.1.30.md index 84ca49d3100..c2a513a440b 100644 --- a/src/current/_includes/releases/v23.1/v23.1.30.md +++ b/src/current/_includes/releases/v23.1/v23.1.30.md @@ -6,30 +6,19 @@ Release Date: December 12, 2024

Security updates

-- All cluster settings that accept strings are now fully redacted when transmitted as part of CockroachDB's diagnostics telemetry. The transmitted payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}), you can safely turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134063][#134063] +- All cluster settings that accept strings are now fully redacted when transmitted as part of CockroachDB's diagnostics telemetry. The transmitted payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}), you can safely turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. #134063

General changes

-- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134083][#134083] +- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. #134083

Bug fixes

-- Fixed a bug where CockroachDB could encounter an internal error `interface conversion: coldata.Column is` in an edge case. The bug was present in v22.2.13+, v23.1.9+, and v23.2+. [#133758][#133758] -- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. [#133824][#133824] -- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE FROM ... user` on an object that is not a sequence. [#133706][#133706] -- Addressed a panic that could occur inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. [#133866][#133866] -- Previously, when executing queries with index / lookup joins where ordering needed to be maintained, CockroachDB's behavior could lead to increased query latency, potentially by several orders of magnitude. This bug was introduced in v22.2, and is now fixed. [#134363][#134363] -- Fixed a bug where `DROP CASCADE` would occasionally panic with an `un-dropped backref` message on partitioned tables. [#134524][#134524] -- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. [#134603][#134603] - -[#133706]: https://github.com/cockroachdb/cockroach/pull/133706 -[#133758]: https://github.com/cockroachdb/cockroach/pull/133758 -[#133824]: https://github.com/cockroachdb/cockroach/pull/133824 -[#133866]: https://github.com/cockroachdb/cockroach/pull/133866 -[#134063]: https://github.com/cockroachdb/cockroach/pull/134063 -[#134083]: https://github.com/cockroachdb/cockroach/pull/134083 -[#134363]: https://github.com/cockroachdb/cockroach/pull/134363 -[#134524]: https://github.com/cockroachdb/cockroach/pull/134524 -[#134603]: https://github.com/cockroachdb/cockroach/pull/134603 -[#134649]: https://github.com/cockroachdb/cockroach/pull/134649 -[154e9f0e0]: https://github.com/cockroachdb/cockroach/commit/154e9f0e0 +- Fixed a bug where CockroachDB could encounter an internal error `interface conversion: coldata.Column is` in an edge case. The bug was present in v22.2.13+, v23.1.9+, and v23.2+. #133758 +- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. #133824 +- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE FROM ... user` on an object that is not a sequence. #133706 +- Addressed a panic that could occur inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. #133866 +- Previously, when executing queries with index / lookup joins where ordering needed to be maintained, CockroachDB's behavior could lead to increased query latency, potentially by several orders of magnitude. This bug was introduced in v22.2, and is now fixed. #134363 +- Fixed a bug where `DROP CASCADE` would occasionally panic with an `un-dropped backref` message on partitioned tables. #134524 +- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. #134603 + diff --git a/src/current/_includes/releases/v23.1/v23.1.4.md b/src/current/_includes/releases/v23.1/v23.1.4.md index 9f528b9918d..3be44b28a53 100644 --- a/src/current/_includes/releases/v23.1/v23.1.4.md +++ b/src/current/_includes/releases/v23.1/v23.1.4.md @@ -6,39 +6,39 @@ Release Date: June 20, 2023

Security updates

-- The new [`sql.auth.createrole_allows_grant_role_membership.enabled`]({% link v23.1/cluster-settings.md %}#setting-sql-auth-createrole-allows-grant-role-membership-enabled) cluster setting allows a user with the [`CREATEROLE` role option]({% link v23.1/create-user.md %}#role-options) to grant and revoke non-admin roles. This cluster setting defaults to `false`, but is expected to become the default behavior in the future. [#104445][#104445] +- The new [`sql.auth.createrole_allows_grant_role_membership.enabled`]({% link v23.1/cluster-settings.md %}#setting-sql-auth-createrole-allows-grant-role-membership-enabled) cluster setting allows a user with the [`CREATEROLE` role option]({% link v23.1/create-user.md %}#role-options) to grant and revoke non-admin roles. This cluster setting defaults to `false`, but is expected to become the default behavior in the future. #104445

{{ site.data.products.enterprise }} edition changes

-- Fixed an initialization race condition in [changefeed schema feeds]({% link v23.1/changefeed-examples.md %}) that could cause a node to crash with a null pointer exception. [#104934][#104934] +- Fixed an initialization race condition in [changefeed schema feeds]({% link v23.1/changefeed-examples.md %}) that could cause a node to crash with a null pointer exception. #104934

SQL language changes

-- Users with [`MODIFYSQLCLUSTERSETTING`](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) can now view only `sql.defaults` [cluster settings]({% link v23.1/cluster-settings.md %}) instead of all cluster settings. [#104542][#104542] +- Users with [`MODIFYSQLCLUSTERSETTING`](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) can now view only `sql.defaults` [cluster settings]({% link v23.1/cluster-settings.md %}) instead of all cluster settings. #104542

Operational changes

-- The [`debug.zip`]({% link v23.1/cockroach-debug-zip.md %}) archive now contains the files formerly located at `nodes/*/ranges/*.json` in one file per node, `nodes/*/ranges.json`. [#104248][#104248] -- The `http-defaults` and `http-servers` sections of the [log config]({% link v23.1/configure-logs.md %}) will now accept a `headers` field containing a map of key-value string pairs which will comprise custom HTTP headers appended to every request. Additionally, a `compression` value can now be set to `gzip` or `none` to select a compression method for the HTTP request body. By default, `gzip` is selected. Previous functionality did not compress by default. [#104814][#104814] +- The [`debug.zip`]({% link v23.1/cockroach-debug-zip.md %}) archive now contains the files formerly located at `nodes/*/ranges/*.json` in one file per node, `nodes/*/ranges.json`. #104248 +- The `http-defaults` and `http-servers` sections of the [log config]({% link v23.1/configure-logs.md %}) will now accept a `headers` field containing a map of key-value string pairs which will comprise custom HTTP headers appended to every request. Additionally, a `compression` value can now be set to `gzip` or `none` to select a compression method for the HTTP request body. By default, `gzip` is selected. Previous functionality did not compress by default. #104814

Command-line changes

-- The new log config option [`buffering`]({% link v23.1/configure-logs.md %}#log-buffering-for-network-sinks) allows you to [format buffer output as JSON arrays]({% link v23.1/configure-logs.md %}#file-logging-format). This is useful for APIs that consume JSON arrays, such as the Datadog logs API. [#104790][#104790] +- The new log config option [`buffering`]({% link v23.1/configure-logs.md %}#log-buffering-for-network-sinks) allows you to [format buffer output as JSON arrays]({% link v23.1/configure-logs.md %}#file-logging-format). This is useful for APIs that consume JSON arrays, such as the Datadog logs API. #104790

DB Console changes

-- The DB Console overview page now displays a warning when all nodes are running on a new version but the cluster upgrade has not been finalized. [#104874][#104874] -- The histogram window merge calculation now interpolates quantile values more accurately, and [**Metrics** charts]({% link v23.1/ui-overview.md %}#metrics) in the DB Console are smoother and more accurate. [#104815][#104815] +- The DB Console overview page now displays a warning when all nodes are running on a new version but the cluster upgrade has not been finalized. #104874 +- The histogram window merge calculation now interpolates quantile values more accurately, and [**Metrics** charts]({% link v23.1/ui-overview.md %}#metrics) in the DB Console are smoother and more accurate. #104815

Bug fixes

-- Fixed a bug where a transaction retry could miss job rows during the backfill of the [**Jobs** table]({% link v23.1/ui-jobs-page.md %}#jobs-table). [#104757][#104757] -- Fixed a bug where admin or root user privileges were erroneously required to use [`SHOW SYSTEM GRANTS`]({% link v23.1/show-system-grants.md %}). [#104732][#104732] -- Fixed a bug that prevented display of the column selector on the [**Jobs** page]({% link v23.1/ui-jobs-page.md %}). [#104738][#104738] -- Fixed a bug where an invalid split could crash and prevent restarts of nodes that hold a replica for the right-hand side. [#104850][#104850] -- Fixed the `debug recover make-plan` command to ignore partial range metadata when the metadata can't be fully read, and instead rely solely on replica info from storage to produce the recovery plan. [#104774][#104774] -- Fixed a metric bug that could cause volumes such as RAID logical volumes to be counted twice.[#104806][#104806] -- Fixed a bug in `upstream etcd-io/raft` which could cause an unlimited amount of log to be loaded into memory. This could cause a node to crash with an out-of-memory (OOM) exception. The log scan now has a limited memory footprint. [#104968][#104968] +- Fixed a bug where a transaction retry could miss job rows during the backfill of the [**Jobs** table]({% link v23.1/ui-jobs-page.md %}#jobs-table). #104757 +- Fixed a bug where admin or root user privileges were erroneously required to use [`SHOW SYSTEM GRANTS`]({% link v23.1/show-system-grants.md %}). #104732 +- Fixed a bug that prevented display of the column selector on the [**Jobs** page]({% link v23.1/ui-jobs-page.md %}). #104738 +- Fixed a bug where an invalid split could crash and prevent restarts of nodes that hold a replica for the right-hand side. #104850 +- Fixed the `debug recover make-plan` command to ignore partial range metadata when the metadata can't be fully read, and instead rely solely on replica info from storage to produce the recovery plan. #104774 +- Fixed a metric bug that could cause volumes such as RAID logical volumes to be counted twice.#104806 +- Fixed a bug in `upstream etcd-io/raft` which could cause an unlimited amount of log to be loaded into memory. This could cause a node to crash with an out-of-memory (OOM) exception. The log scan now has a limited memory footprint. #104968
@@ -48,19 +48,3 @@ This release includes 53 merged PRs by 30 authors.
-[#104248]: https://github.com/cockroachdb/cockroach/pull/104248 -[#104445]: https://github.com/cockroachdb/cockroach/pull/104445 -[#104542]: https://github.com/cockroachdb/cockroach/pull/104542 -[#104732]: https://github.com/cockroachdb/cockroach/pull/104732 -[#104738]: https://github.com/cockroachdb/cockroach/pull/104738 -[#104757]: https://github.com/cockroachdb/cockroach/pull/104757 -[#104774]: https://github.com/cockroachdb/cockroach/pull/104774 -[#104790]: https://github.com/cockroachdb/cockroach/pull/104790 -[#104806]: https://github.com/cockroachdb/cockroach/pull/104806 -[#104814]: https://github.com/cockroachdb/cockroach/pull/104814 -[#104815]: https://github.com/cockroachdb/cockroach/pull/104815 -[#104819]: https://github.com/cockroachdb/cockroach/pull/104819 -[#104850]: https://github.com/cockroachdb/cockroach/pull/104850 -[#104874]: https://github.com/cockroachdb/cockroach/pull/104874 -[#104934]: https://github.com/cockroachdb/cockroach/pull/104934 -[#104968]: https://github.com/cockroachdb/cockroach/pull/104968 diff --git a/src/current/_includes/releases/v23.1/v23.1.5.md b/src/current/_includes/releases/v23.1/v23.1.5.md index f737af77996..aa2bfaa92e0 100644 --- a/src/current/_includes/releases/v23.1/v23.1.5.md +++ b/src/current/_includes/releases/v23.1/v23.1.5.md @@ -6,53 +6,53 @@ Release Date: July 5, 2023

Security updates

-- The full set of TLS ciphers that was present in v22.1 have been included in the existing cipher suites list, which can be enabled with the `COCKROACH_TLS_ENABLE_OLD_CIPHER_SUITES` environment variable. [#105370][#105370] +- The full set of TLS ciphers that was present in v22.1 have been included in the existing cipher suites list, which can be enabled with the `COCKROACH_TLS_ENABLE_OLD_CIPHER_SUITES` environment variable. #105370

{{ site.data.products.enterprise }} edition changes

-- Added new [cluster settings]({% link v23.1/cluster-settings.md %}) in the `server.oidc_authentication.generate_cluster_sso_token` namespace that support using OIDC to generate a [JWT auth token for cluster SSO]({% link v23.1/sso-sql.md %}). [#105643][#105643] +- Added new [cluster settings]({% link v23.1/cluster-settings.md %}) in the `server.oidc_authentication.generate_cluster_sso_token` namespace that support using OIDC to generate a [JWT auth token for cluster SSO]({% link v23.1/sso-sql.md %}). #105643

SQL language changes

-- Improved the inline documentation and error messages related to [`SHOW RANGES`]({% link v23.1/show-ranges.md %}). [#105278][#105278] -- Inbound [foreign keys]({% link v23.1/foreign-key.md %}) on TTL tables are now allowed. [#105341][#105341] -- Added the columns `default_value` and `origin` ( with the values `default`, `override`, `external-override`) to the [`SHOW CLUSTER SETTING`]({% link v23.1/show-cluster-setting.md %}) command. [#105452][#105452] -- Added the `VIEWACTIVITY` and `VIEWACTIVITYREDACTED` [system privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) for the `crdb_internal.cluster_queries` and `crdb_internal.node_queries` [tables]({% link v23.1/crdb-internal.md %}). [#105467][#105467] -- Fixed the error message to be more understandable when attempting to create [UDFs]({% link v23.1/user-defined-functions.md %}) under a virtual or temporary schema. [#105627][#105627] +- Improved the inline documentation and error messages related to [`SHOW RANGES`]({% link v23.1/show-ranges.md %}). #105278 +- Inbound [foreign keys]({% link v23.1/foreign-key.md %}) on TTL tables are now allowed. #105341 +- Added the columns `default_value` and `origin` ( with the values `default`, `override`, `external-override`) to the [`SHOW CLUSTER SETTING`]({% link v23.1/show-cluster-setting.md %}) command. #105452 +- Added the `VIEWACTIVITY` and `VIEWACTIVITYREDACTED` [system privileges](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges) for the `crdb_internal.cluster_queries` and `crdb_internal.node_queries` [tables]({% link v23.1/crdb-internal.md %}). #105467 +- Fixed the error message to be more understandable when attempting to create [UDFs]({% link v23.1/user-defined-functions.md %}) under a virtual or temporary schema. #105627

Command-line changes

-- Running [`cockroch node decommission `]({% link v23.1/cockroach-node.md %}#node-decommission) for a node that has already been decommissioned will now exit with code 0, as had been the case in CockroachDB versions prior to v23.1.0. [#103933][#103933] +- Running [`cockroch node decommission `]({% link v23.1/cockroach-node.md %}#node-decommission) for a node that has already been decommissioned will now exit with code 0, as had been the case in CockroachDB versions prior to v23.1.0. #103933

DB Console changes

-- The [**Range Report** page]({% link v23.1/ui-hot-ranges-page.md %}#range-report) (route `/reports/range/:rangeID`) shows the **Hot Ranges** menu item as highlighted in the left-side menu. The back button in the **Range Report** page redirects back to the **Hot Ranges** page. [#104980][#104980] -- Added description to the tooltip for the `Idle` status only for the Active Transactions view. Excluded `Idle` status filter option for the Active Statements view. [#105062][#105062] +- The [**Range Report** page]({% link v23.1/ui-hot-ranges-page.md %}#range-report) (route `/reports/range/:rangeID`) shows the **Hot Ranges** menu item as highlighted in the left-side menu. The back button in the **Range Report** page redirects back to the **Hot Ranges** page. #104980 +- Added description to the tooltip for the `Idle` status only for the Active Transactions view. Excluded `Idle` status filter option for the Active Statements view. #105062

Bug fixes

-- Added more precision to small percentage values on the percentage bars on the DB Console. [#105078][#105078] -- Fixed a crash when using `DurationToNumber` with empty duration object on SQL Activity tables. [#105152][#105152] -- `SpanStats` is no longer subject to stale information, and should be considered authoritative. [#105000][#105000] -- Fixed a bug in MuxRangefeed implementation that may cause MuxRangefeed to become stuck if enough ranges encountered certain error concurrently. [#105186][#105186] -- [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}) no longer fails on v22.2 and v23.1 mixed-version clusters. [#105008][#105008] -- [`GRANT SYSTEM ALL ...`]({% link v23.1/grant.md %}) no longer causes the grantee to be unable to log in. This was due to a bug where `ALL` would include the `NOSQLLOGIN` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges). Since `NOSQLLOGIN` is the only "negative" privilege, it is now excluded from the `ALL` shorthand, and must be granted explicitly in order to restrict logins. [#105080][#105080] -- CockroachDB previously could encounter `zero transaction timestamp in EvalContext` when evaluating the [`COPY FROM`]({% link v23.1/copy.md %}) command. The bug was introduced in v23.1.0 and is now fixed. [#105295][#105295] -- Fixed the data fetching for the **Database** and **Table Details** pages. Prior to this change, some databases/tables could be permanently stuck in a loading state, causing their corresponding page to permanently show a spinner. This change fixes the data fetching for these pages to ensure all databases and tables are loaded correctly. [#105315][#105315] -- Fixed a bug where [`INSERT .. ON CONFLICT .. DO UPDATE`]({% link v23.1/insert.md %}) queries incorrectly resulted in an "ambiguous column" error. The bug only presented if the target table had a computed column with an expression referencing a column with a `DEFAULT` value. [#105025][#105025] -- On the **SQL Statistics** pages, a transaction query is now available as long as the statement fingerprints associated with the transaction also exist in the payload. [#105318][#105318] -- Previously, CockroachDB would crash when evaluating [`CREATE TABLE .. AS or CREATE MATERIALIZED VIEW .. AS`]({% link v23.1/create-table.md %}) statements when the `AS` clause selected data from `crdb_internal.cluster_statement_statistics` or `crdb_internal.cluster_transaction_statistics` virtual tables. The bug has been present since at least v22.1 and is now fixed. [#105325][#105325] -- Fixed a source of mutex contention within the storage engine that could increase tail latencies on high-CPU, high-throughput deployments. [#105361][#105361] -- Fixed a bug where nodes could terminate with the following message: `server startup failed: cockroach server exited with error: ‹migration-job-find-already-completed›: key range id:X is unavailable: ‹failed to send RPC: no replica node information available via gossip for rX›`. [#104246][#104246] -- Previously, cross-database type references were allowed through `CREATE TABLE...AS` statements if the source table was from another database and any of its columns was of a user-defined type. This introduced a bug where the source table could be dropped and the type could not be found for the CTAS table. This commit disallows such CTAS as a fix. [#105621][#105621] -- CockroachDB can now automatically delete statistics for dropped tables from `system.table_statistics` table. However, out of caution, this mechanism is disabled by default on v23.1 releases. [#105479][#105479] -- Fixed a race condition that can occur when multiple SQL servers are created simultaneously, causing simultaneous writes to an unprotected global variable used to configure a CCL audit logging feature. [#105725][#105725] -- Previously, referencing a user-defined type in the body of a [user-defined function]({% link v23.1/user-defined-functions.md %}) would result in an error at the time of creating the function. This is now fixed. [#105617][#105617] -- CockroachDB now returns an error during [UDF]({% link v23.1/user-defined-functions.md %}) creation if an input argument has type `RECORD`. [#105735][#105735] +- Added more precision to small percentage values on the percentage bars on the DB Console. #105078 +- Fixed a crash when using `DurationToNumber` with empty duration object on SQL Activity tables. #105152 +- `SpanStats` is no longer subject to stale information, and should be considered authoritative. #105000 +- Fixed a bug in MuxRangefeed implementation that may cause MuxRangefeed to become stuck if enough ranges encountered certain error concurrently. #105186 +- [`SHOW CHANGEFEED JOBS`]({% link v23.1/show-jobs.md %}) no longer fails on v22.2 and v23.1 mixed-version clusters. #105008 +- [`GRANT SYSTEM ALL ...`]({% link v23.1/grant.md %}) no longer causes the grantee to be unable to log in. This was due to a bug where `ALL` would include the `NOSQLLOGIN` [system privilege](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#supported-privileges). Since `NOSQLLOGIN` is the only "negative" privilege, it is now excluded from the `ALL` shorthand, and must be granted explicitly in order to restrict logins. #105080 +- CockroachDB previously could encounter `zero transaction timestamp in EvalContext` when evaluating the [`COPY FROM`]({% link v23.1/copy.md %}) command. The bug was introduced in v23.1.0 and is now fixed. #105295 +- Fixed the data fetching for the **Database** and **Table Details** pages. Prior to this change, some databases/tables could be permanently stuck in a loading state, causing their corresponding page to permanently show a spinner. This change fixes the data fetching for these pages to ensure all databases and tables are loaded correctly. #105315 +- Fixed a bug where [`INSERT .. ON CONFLICT .. DO UPDATE`]({% link v23.1/insert.md %}) queries incorrectly resulted in an "ambiguous column" error. The bug only presented if the target table had a computed column with an expression referencing a column with a `DEFAULT` value. #105025 +- On the **SQL Statistics** pages, a transaction query is now available as long as the statement fingerprints associated with the transaction also exist in the payload. #105318 +- Previously, CockroachDB would crash when evaluating [`CREATE TABLE .. AS or CREATE MATERIALIZED VIEW .. AS`]({% link v23.1/create-table.md %}) statements when the `AS` clause selected data from `crdb_internal.cluster_statement_statistics` or `crdb_internal.cluster_transaction_statistics` virtual tables. The bug has been present since at least v22.1 and is now fixed. #105325 +- Fixed a source of mutex contention within the storage engine that could increase tail latencies on high-CPU, high-throughput deployments. #105361 +- Fixed a bug where nodes could terminate with the following message: `server startup failed: cockroach server exited with error: ‹migration-job-find-already-completed›: key range id:X is unavailable: ‹failed to send RPC: no replica node information available via gossip for rX›`. #104246 +- Previously, cross-database type references were allowed through `CREATE TABLE...AS` statements if the source table was from another database and any of its columns was of a user-defined type. This introduced a bug where the source table could be dropped and the type could not be found for the CTAS table. This commit disallows such CTAS as a fix. #105621 +- CockroachDB can now automatically delete statistics for dropped tables from `system.table_statistics` table. However, out of caution, this mechanism is disabled by default on v23.1 releases. #105479 +- Fixed a race condition that can occur when multiple SQL servers are created simultaneously, causing simultaneous writes to an unprotected global variable used to configure a CCL audit logging feature. #105725 +- Previously, referencing a user-defined type in the body of a [user-defined function]({% link v23.1/user-defined-functions.md %}) would result in an error at the time of creating the function. This is now fixed. #105617 +- CockroachDB now returns an error during [UDF]({% link v23.1/user-defined-functions.md %}) creation if an input argument has type `RECORD`. #105735

Performance improvements

-- If the `sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) is disabled, the optimizer can now eliminate uniqueness checks for `STRING` and `BYTES` columns when the value is set to `gen_random_uuid()` (with an implicit or explicit cast to `STRING` or `BYTES`). If you still want the checks, you can set `sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled` to `true` (the default is `false`). [#105446][#105446] +- If the `sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}) is disabled, the optimizer can now eliminate uniqueness checks for `STRING` and `BYTES` columns when the value is set to `gen_random_uuid()` (with an implicit or explicit cast to `STRING` or `BYTES`). If you still want the checks, you can set `sql.optimizer.uniqueness_checks_for_gen_random_uuid.enabled` to `true` (the default is `false`). #105446
@@ -62,32 +62,3 @@ This release includes 115 merged PRs by 44 authors.
-[#103933]: https://github.com/cockroachdb/cockroach/pull/103933 -[#104246]: https://github.com/cockroachdb/cockroach/pull/104246 -[#104980]: https://github.com/cockroachdb/cockroach/pull/104980 -[#105000]: https://github.com/cockroachdb/cockroach/pull/105000 -[#105008]: https://github.com/cockroachdb/cockroach/pull/105008 -[#105025]: https://github.com/cockroachdb/cockroach/pull/105025 -[#105062]: https://github.com/cockroachdb/cockroach/pull/105062 -[#105078]: https://github.com/cockroachdb/cockroach/pull/105078 -[#105080]: https://github.com/cockroachdb/cockroach/pull/105080 -[#105152]: https://github.com/cockroachdb/cockroach/pull/105152 -[#105186]: https://github.com/cockroachdb/cockroach/pull/105186 -[#105278]: https://github.com/cockroachdb/cockroach/pull/105278 -[#105295]: https://github.com/cockroachdb/cockroach/pull/105295 -[#105315]: https://github.com/cockroachdb/cockroach/pull/105315 -[#105318]: https://github.com/cockroachdb/cockroach/pull/105318 -[#105325]: https://github.com/cockroachdb/cockroach/pull/105325 -[#105341]: https://github.com/cockroachdb/cockroach/pull/105341 -[#105361]: https://github.com/cockroachdb/cockroach/pull/105361 -[#105370]: https://github.com/cockroachdb/cockroach/pull/105370 -[#105446]: https://github.com/cockroachdb/cockroach/pull/105446 -[#105452]: https://github.com/cockroachdb/cockroach/pull/105452 -[#105467]: https://github.com/cockroachdb/cockroach/pull/105467 -[#105479]: https://github.com/cockroachdb/cockroach/pull/105479 -[#105617]: https://github.com/cockroachdb/cockroach/pull/105617 -[#105621]: https://github.com/cockroachdb/cockroach/pull/105621 -[#105627]: https://github.com/cockroachdb/cockroach/pull/105627 -[#105643]: https://github.com/cockroachdb/cockroach/pull/105643 -[#105725]: https://github.com/cockroachdb/cockroach/pull/105725 -[#105735]: https://github.com/cockroachdb/cockroach/pull/105735 diff --git a/src/current/_includes/releases/v23.1/v23.1.6.md b/src/current/_includes/releases/v23.1/v23.1.6.md index 56c000b932a..f1952cf1291 100644 --- a/src/current/_includes/releases/v23.1/v23.1.6.md +++ b/src/current/_includes/releases/v23.1/v23.1.6.md @@ -6,9 +6,9 @@ Release Date: July 24, 2023

Bug fixes

-- Fixed a bug in v23.1.5 where [debug zips]({% link v23.1/cockroach-debug-zip.md %}) were empty in the `crdb_internal.cluster_settings.txt` file. Debug zips now properly show the information from `cluster_settings`. [#107105][#107105] -- Fixed a bug where some primary indexes would incorrectly be treated internally as secondary indexes, which could cause schema change operations to fail. The bug could occur if [`ALTER PRIMARY KEY`]({% link v23.1/alter-table.md %}#alter-primary-key) was used on v21.1 or earlier, and the cluster was upgraded. [#106426][#106426] -- Extended the `cockroach debug doctor` to detect [indexes]({% link v23.1/indexes.md %}) which could potentially lose data by being dropped when a column is stored inside them and added a check inside [`DROP INDEX`]({% link v23.1/drop-index.md %}) to prevent dropping indexes with this problem to avoid data loss. [#106863][#106863] +- Fixed a bug in v23.1.5 where [debug zips]({% link v23.1/cockroach-debug-zip.md %}) were empty in the `crdb_internal.cluster_settings.txt` file. Debug zips now properly show the information from `cluster_settings`. #107105 +- Fixed a bug where some primary indexes would incorrectly be treated internally as secondary indexes, which could cause schema change operations to fail. The bug could occur if [`ALTER PRIMARY KEY`]({% link v23.1/alter-table.md %}#alter-primary-key) was used on v21.1 or earlier, and the cluster was upgraded. #106426 +- Extended the `cockroach debug doctor` to detect [indexes]({% link v23.1/indexes.md %}) which could potentially lose data by being dropped when a column is stored inside them and added a check inside [`DROP INDEX`]({% link v23.1/drop-index.md %}) to prevent dropping indexes with this problem to avoid data loss. #106863
@@ -18,6 +18,3 @@ This release includes 3 merged PRs by 15 authors.
-[#106863]: https://github.com/cockroachdb/cockroach/pull/106863 -[#106426]: https://github.com/cockroachdb/cockroach/pull/106426 -[#107105]: https://github.com/cockroachdb/cockroach/pull/107105 diff --git a/src/current/_includes/releases/v23.1/v23.1.7.md b/src/current/_includes/releases/v23.1/v23.1.7.md index 116c3b86d81..22262a35aba 100644 --- a/src/current/_includes/releases/v23.1/v23.1.7.md +++ b/src/current/_includes/releases/v23.1/v23.1.7.md @@ -7,28 +7,28 @@ Release Date: July 31, 2023

SQL language changes

-- Added the `crdb_internal.reset_activity_tables` [built-in function]({% link v23.1/functions-and-operators.md %}) to allow users to reset the statistics in the `system.{statement|transaction}_activity` tables. Users require the [`admin` role](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#admin-role) to use this built-in function. [#106129][#106129] -- Added the `sql.telemetry.query_sampling.internal.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}), which is `false` by default. If set to `true`, internal app queries will be reported to [telemetry]({% link v23.1/logging.md %}#telemetry) when query sampling to telemetry is [enabled]({% link v23.1/configure-logs.md %}). [#107182][#107182] +- Added the `crdb_internal.reset_activity_tables` [built-in function]({% link v23.1/functions-and-operators.md %}) to allow users to reset the statistics in the `system.{statement|transaction}_activity` tables. Users require the [`admin` role](https://www.cockroachlabs.com/docs/v23.1/security-reference/authorization#admin-role) to use this built-in function. #106129 +- Added the `sql.telemetry.query_sampling.internal.enabled` [cluster setting]({% link v23.1/cluster-settings.md %}), which is `false` by default. If set to `true`, internal app queries will be reported to [telemetry]({% link v23.1/logging.md %}#telemetry) when query sampling to telemetry is [enabled]({% link v23.1/configure-logs.md %}). #107182

DB Console changes

-- Added a timescale label to the [**Diagnostics** tab]({% link v23.1/ui-statements-page.md %}#diagnostics) of the [**Statement Details** page]({% link v23.1/ui-statements-page.md %}). The time window for the statement diagnostics is now displayed. [#107082][#107082] -- Increased the timeout duration for loading the [**Hot Ranges** page]({% link v23.1/ui-hot-ranges-page.md %}) to 30 minutes. [#107497][#107497] +- Added a timescale label to the [**Diagnostics** tab]({% link v23.1/ui-statements-page.md %}#diagnostics) of the [**Statement Details** page]({% link v23.1/ui-statements-page.md %}). The time window for the statement diagnostics is now displayed. #107082 +- Increased the timeout duration for loading the [**Hot Ranges** page]({% link v23.1/ui-hot-ranges-page.md %}) to 30 minutes. #107497

Bug fixes

-- The **Regions/Nodes** column on the [**Database** and **Database Details** pages]({% link v23.1/ui-databases-page.md %}) will now render properly. This column displays for clusters with more than 1 node. [#105824][#105824] -- Fixed a bug where the [**Job Details** page]({% link v23.1/ui-jobs-page.md %}#job-details) would flicker between the job details and a loading animation while a job is still executing. [#106153][#106153] -- Fixed a bug where the [**Key Visualizer** page]({% link v23.1/ui-key-visualizer.md %}) would crash due to an invalid conversion to `int` error. [#106357][#106357] -- Fixed a bug that caused an infinite re-render on the [**Key Visualizer** page]({% link v23.1/ui-key-visualizer.md %}) when a custom time period was selected. [#106430][#106430] -- Fixed a bug on the **SQL Activity**, [**Statements** page]({% link v23.1/ui-statements-page.md %}) where the database filter would not return results even when statements existed for the selected database. This is now fixed so that selecting a database filter from the filters menu on the [**Statements** page]({% link v23.1/ui-statements-page.md %}) will function as expected. [#106434][#106434] -- Fixed a bug where the [**Metrics** page]({% link v23.1/ui-overview-dashboard.md %}) was not updating automatically on rolling window options. [#106766][#106766] -- Fixed a bug where [statement diagnostics]({% link v23.1/ui-statements-page.md %}#diagnostics) on the [**Statements** page]({% link v23.1/ui-statements-page.md %}) were not always showing. This is now fixed with statement diagnostics displaying for the correct time period. [#106766][#106766] -- Fixed a bug where the **Sort** dropdown on the [**Network Latency** page]({% link v23.1/ui-network-latency-page.md %}) would not persist the selected value because a page reload was triggered. [#107213][#107213] -- Fixed a bug where the index recommendation on the [**Insights** page]({% link v23.1/ui-insights-page.md %}) would use the fully qualified table name to create an index name, which would cause an error due to the invalid syntax. Indexes can now be created directly from the DB Console without encountering this error. [#107218][#107218] -- Fixed a bug where [changefeeds]({% link v23.1/change-data-capture-overview.md %}) would fail when upgrading to version [v23.1.5](#v23-1-5) because the job record did not have a `clusterID` field set. [#106399][#106399] -- Fixed a bug where `UPDATE`, `UPSERT`, `DELETE` statements running concurrently with [`ALTER TABLE..ADD COLUMN`]({% link v23.1/alter-table.md %}#add-column) of a virtual computed column on the same table would fail. [#107403][#107403] -- Fixed a bug where [SQL statistics compaction jobs]({% link v23.1/show-jobs.md %}#show-automatic-jobs) would fail. This is now fixed by reducing the contention on the `system.statement_statistics` table. [#107573][#107573] +- The **Regions/Nodes** column on the [**Database** and **Database Details** pages]({% link v23.1/ui-databases-page.md %}) will now render properly. This column displays for clusters with more than 1 node. #105824 +- Fixed a bug where the [**Job Details** page]({% link v23.1/ui-jobs-page.md %}#job-details) would flicker between the job details and a loading animation while a job is still executing. #106153 +- Fixed a bug where the [**Key Visualizer** page]({% link v23.1/ui-key-visualizer.md %}) would crash due to an invalid conversion to `int` error. #106357 +- Fixed a bug that caused an infinite re-render on the [**Key Visualizer** page]({% link v23.1/ui-key-visualizer.md %}) when a custom time period was selected. #106430 +- Fixed a bug on the **SQL Activity**, [**Statements** page]({% link v23.1/ui-statements-page.md %}) where the database filter would not return results even when statements existed for the selected database. This is now fixed so that selecting a database filter from the filters menu on the [**Statements** page]({% link v23.1/ui-statements-page.md %}) will function as expected. #106434 +- Fixed a bug where the [**Metrics** page]({% link v23.1/ui-overview-dashboard.md %}) was not updating automatically on rolling window options. #106766 +- Fixed a bug where [statement diagnostics]({% link v23.1/ui-statements-page.md %}#diagnostics) on the [**Statements** page]({% link v23.1/ui-statements-page.md %}) were not always showing. This is now fixed with statement diagnostics displaying for the correct time period. #106766 +- Fixed a bug where the **Sort** dropdown on the [**Network Latency** page]({% link v23.1/ui-network-latency-page.md %}) would not persist the selected value because a page reload was triggered. #107213 +- Fixed a bug where the index recommendation on the [**Insights** page]({% link v23.1/ui-insights-page.md %}) would use the fully qualified table name to create an index name, which would cause an error due to the invalid syntax. Indexes can now be created directly from the DB Console without encountering this error. #107218 +- Fixed a bug where [changefeeds]({% link v23.1/change-data-capture-overview.md %}) would fail when upgrading to version [v23.1.5](#v23-1-5) because the job record did not have a `clusterID` field set. #106399 +- Fixed a bug where `UPDATE`, `UPSERT`, `DELETE` statements running concurrently with [`ALTER TABLE..ADD COLUMN`]({% link v23.1/alter-table.md %}#add-column) of a virtual computed column on the same table would fail. #107403 +- Fixed a bug where [SQL statistics compaction jobs]({% link v23.1/show-jobs.md %}#show-automatic-jobs) would fail. This is now fixed by reducing the contention on the `system.statement_statistics` table. #107573 - Fixed a bug where running a [changefeed]({% link v23.2/change-data-capture-overview.md %}) that targets a table with a user-defined type column and with the [`envelope` option]({% link v23.2/create-changefeed.md %}#envelope) set to any value other than `wrapped` would cause a node panic due to a nil dereference. [#119746][119746]
@@ -39,20 +39,3 @@ This release includes 16 merged PRs by 11 authors.
-[#105824]: https://github.com/cockroachdb/cockroach/pull/105824 -[#105947]: https://github.com/cockroachdb/cockroach/pull/105947 -[#106129]: https://github.com/cockroachdb/cockroach/pull/106129 -[#106153]: https://github.com/cockroachdb/cockroach/pull/106153 -[#106357]: https://github.com/cockroachdb/cockroach/pull/106357 -[#106434]: https://github.com/cockroachdb/cockroach/pull/106434 -[#106430]: https://github.com/cockroachdb/cockroach/pull/106430 -[#106766]: https://github.com/cockroachdb/cockroach/pull/106766 -[#107082]: https://github.com/cockroachdb/cockroach/pull/107082 -[#107182]: https://github.com/cockroachdb/cockroach/pull/107182 -[#107213]: https://github.com/cockroachdb/cockroach/pull/107213 -[#107218]: https://github.com/cockroachdb/cockroach/pull/107213 -[#106399]: https://github.com/cockroachdb/cockroach/pull/106399 -[#107403]: https://github.com/cockroachdb/cockroach/pull/107403 -[#107497]: https://github.com/cockroachdb/cockroach/pull/107497 -[#107573]: https://github.com/cockroachdb/cockroach/pull/107573 -[#119746]: https://github.com/cockroachdb/cockroach/pull/119746 diff --git a/src/current/_includes/releases/v23.1/v23.1.8.md b/src/current/_includes/releases/v23.1/v23.1.8.md index 9bb4f784fdd..dc6caa28741 100644 --- a/src/current/_includes/releases/v23.1/v23.1.8.md +++ b/src/current/_includes/releases/v23.1/v23.1.8.md @@ -15,12 +15,12 @@ Release Date: August 7, 2023 azure_client_secret: {MY_CLIENT_SECRET} ~~~ - Set the environment variable `COCKROACH_AZURE_APPLICATION_CREDENTIALS_FILE` to the path of the file. The implicit authentication method first attempts to authenticate using credentials in the file before attempting to authenticate using any other credentials specified in the [`DefaultAzureCredential` class](https://learn.microsoft.com/dotnet/api/azure.identity.defaultazurecredential?view=azure-dotnet), such as environment variables and Managed Identity. These credentials can also be used for [encrypted backups]({% link v23.1/take-and-restore-encrypted-backups.md %}#use-key-management-service). [#106688][#106688] + Set the environment variable `COCKROACH_AZURE_APPLICATION_CREDENTIALS_FILE` to the path of the file. The implicit authentication method first attempts to authenticate using credentials in the file before attempting to authenticate using any other credentials specified in the [`DefaultAzureCredential` class](https://learn.microsoft.com/dotnet/api/azure.identity.defaultazurecredential?view=azure-dotnet), such as environment variables and Managed Identity. These credentials can also be used for [encrypted backups]({% link v23.1/take-and-restore-encrypted-backups.md %}#use-key-management-service). #106688

Bug fixes

-- Fixed a bug where [`DROP INDEX ... CASCADE`](../v23.1/drop-index.html) could drop unrelated [foreign key constraints](../v23.1/foreign-key.html). For example, if `DROP INDEX ... CASCADE` was executed on a unique index for a `Table A`, and if a `Table B` had a foreign key reference to `Table A`, unrelated outbound foreign key constraints on `Table B` would be dropped. [#107793][#107793] -- Fixed a bug that would result in corruption of [encrypted data at rest on a cluster node](../v23.1/security-reference/encryption.html#encryption-at-rest). If a node with this corrupted state was restarted, the node could fail to rejoin the cluster. If multiple nodes encountered this bug at the same time during roll out, the cluster could lose [quorum](../v23.1/architecture/replication-layer.html#overview). For more information, refer to [Technical Advisory 106617](https://www.cockroachlabs.com/docs/advisories/a106617). [#107277][#107277] +- Fixed a bug where [`DROP INDEX ... CASCADE`](../v23.1/drop-index.html) could drop unrelated [foreign key constraints](../v23.1/foreign-key.html). For example, if `DROP INDEX ... CASCADE` was executed on a unique index for a `Table A`, and if a `Table B` had a foreign key reference to `Table A`, unrelated outbound foreign key constraints on `Table B` would be dropped. #107793 +- Fixed a bug that would result in corruption of [encrypted data at rest on a cluster node](../v23.1/security-reference/encryption.html#encryption-at-rest). If a node with this corrupted state was restarted, the node could fail to rejoin the cluster. If multiple nodes encountered this bug at the same time during roll out, the cluster could lose [quorum](../v23.1/architecture/replication-layer.html#overview). For more information, refer to [Technical Advisory 106617](https://www.cockroachlabs.com/docs/advisories/a106617). #107277
@@ -30,6 +30,3 @@ This release includes 5 merged PRs by 5 authors.
-[#106688]: https://github.com/cockroachdb/cockroach/pull/106688 -[#107793]: https://github.com/cockroachdb/cockroach/pull/107793 -[#107277]: https://github.com/cockroachdb/cockroach/pull/107277 diff --git a/src/current/_includes/releases/v23.1/v23.1.9.md b/src/current/_includes/releases/v23.1/v23.1.9.md index 0c93cdc147f..6b150ccd7a0 100644 --- a/src/current/_includes/releases/v23.1/v23.1.9.md +++ b/src/current/_includes/releases/v23.1/v23.1.9.md @@ -6,123 +6,123 @@ Release Date: September 7, 2023

General changes

-- All [cluster version upgrades](../v23.1/upgrade-cockroach-version.html) perform a [catalog](../v23.1/system-catalogs.html) health check and repair upgrade step before proceeding to other upgrade steps. [#105826][#105826] -- [Upgrading the cluster version](../v23.1/upgrade-cockroach-version.html) to a new release will not only check for descriptor and other [catalog](../v23.1/system-catalogs.html) corruptions, but will attempt to repair some of them on a best-effort basis. This should seamlessly get rid of all longstanding descriptor back-reference corruptions, which typically don't manifest themselves until a [schema change](../v23.1/online-schema-changes.html) or an upgrade are performed. [#106217][#106217] +- All [cluster version upgrades](../v23.1/upgrade-cockroach-version.html) perform a [catalog](../v23.1/system-catalogs.html) health check and repair upgrade step before proceeding to other upgrade steps. #105826 +- [Upgrading the cluster version](../v23.1/upgrade-cockroach-version.html) to a new release will not only check for descriptor and other [catalog](../v23.1/system-catalogs.html) corruptions, but will attempt to repair some of them on a best-effort basis. This should seamlessly get rid of all longstanding descriptor back-reference corruptions, which typically don't manifest themselves until a [schema change](../v23.1/online-schema-changes.html) or an upgrade are performed. #106217

{{ site.data.products.enterprise }} edition changes

-- [Changefeeds](../v23.1/create-and-configure-changefeeds.html) should no longer fail when upgrading to [v23.1.5](../releases/v23.1.html#v23-1-5). [#106399][#106399] -- Fixed an issue where [changefeeds](../v23.1/create-changefeed.html) emitting to a cloud sink with compression could experience resource leakage (memory and goroutines) when experiencing transient errors. [#106857][#106857] -- Augmented the credentials lookup chain for the implicit authentication method in Azure storage and KMS to first look for credentials in a file before proceeding on to the rest of the chain specified in the `DefaultAzureCredential` class (env vars, Managed Identity, etc.). The path of this file is specified by the environment variable `COCKROACH_AZURE_APPLICATION_CREDENTIALS_FILE` and, if not empty, is expected to point to a file that provides the Azure Tenant ID, Client ID, and Client Secret that will be used to access the storage or KMS resource in the following YAML format: `azure_tenant_id: MY_TENANT_ID, azure_client_id: MY_CLIENT_ID, azure_client_secret: MY_CLIENT_SECRET ` [#106688][#106688] -- Fixed a rare [changefeed](../v23.1/create-changefeed.html) issue that was triggered when the parent database or types were dropped, and instead of exiting with a descriptive error message, the changefeed would observe the following opaque error instead: `value type is not BYTES: UNKNOWN`. [#107933][#107933] -- Fixed a potential deadlock when running [changefeeds](../v23.1/create-changefeed.html) with the `end_time` option set. [#108074][#108074] +- [Changefeeds](../v23.1/create-and-configure-changefeeds.html) should no longer fail when upgrading to [v23.1.5](../releases/v23.1.html#v23-1-5). #106399 +- Fixed an issue where [changefeeds](../v23.1/create-changefeed.html) emitting to a cloud sink with compression could experience resource leakage (memory and goroutines) when experiencing transient errors. #106857 +- Augmented the credentials lookup chain for the implicit authentication method in Azure storage and KMS to first look for credentials in a file before proceeding on to the rest of the chain specified in the `DefaultAzureCredential` class (env vars, Managed Identity, etc.). The path of this file is specified by the environment variable `COCKROACH_AZURE_APPLICATION_CREDENTIALS_FILE` and, if not empty, is expected to point to a file that provides the Azure Tenant ID, Client ID, and Client Secret that will be used to access the storage or KMS resource in the following YAML format: `azure_tenant_id: MY_TENANT_ID, azure_client_id: MY_CLIENT_ID, azure_client_secret: MY_CLIENT_SECRET ` #106688 +- Fixed a rare [changefeed](../v23.1/create-changefeed.html) issue that was triggered when the parent database or types were dropped, and instead of exiting with a descriptive error message, the changefeed would observe the following opaque error instead: `value type is not BYTES: UNKNOWN`. #107933 +- Fixed a potential deadlock when running [changefeeds](../v23.1/create-changefeed.html) with the `end_time` option set. #108074

SQL language changes

-- Added a `crdb_internal.reset_activity_tables` [builtin function](../v23.1/functions-and-operators.html) to allow users to reset the [statistics](../v23.1/cost-based-optimizer.html#table-statistics) in the `system.statement_activity` and `system.transaction_activity` tables. Users require [`admin`](../v23.1/security-reference/authorization.html#admin-role) to use this builtin. [#106129][#106129] -- CockroachDB now reports [contention](../v23.1/performance-best-practices-overview.html#transaction-contention) time encountered while executing the mutation statements ([`INSERT`](../v23.1/insert.html), [`UPSERT`](../v23.1/upsert.html), [`UPDATE`](../v23.1/update.html), and [`DELETE`](../v23.1/delete.html)) when run via [`EXPLAIN ANALYZE`](../v23.1/explain-analyze.html). [#106432][#106432] -- Fixed a bug in [`COPY CSV WITH HEADER`](../v23.1/copy.html) that was first introduced in [v23.1](../releases/v23.1.html). On previous versions of CockroachDB, a workaround is to disable [vectorized](../v23.1/vectorized-execution.html) `COPY` by setting the `vectorize` [session variable](../v23.1/set-vars.html#supported-variables) to `false` or to remove the header row from the `COPY` data. [#106967][#106967] -- Added a new boolean [cluster setting ](../v23.1/cluster-settings.html) `sql.telemetry.query_sampling.internal.enabled`. If `true`, internal app queries will be reported to telemetry when query sampling to telemetry is enabled. [#107182][#107182] -- The `SHOW QUERIES` and [`SHOW SESSIONS`](../v23.1/show-sessions.html) commands will now display timestamps using the session's timezone setting. [#108144][#108144] -- Added the [cluster setting](../v23.1/cluster-settings.html) `sql.stats.limit_table_size.enabled`, which controls whether or not we enforce the row limit set by the `sql.stats.persisted_rows.max` cluster setting in the `system.statement_statistics` and `system.transaction_statistics` tables. [#108913][#108913] -- Optimized the `sql-stats-compaction` [job](../v23.1/show-jobs.html)'s [delete query](../v23.1/delete.html) to avoid a [full scan](../v23.1/sql-tuning-with-explain.html#issue-full-table-scans). This helps avoid a [transaction retry error](../v23.1/common-errors.html#restart-transaction) which can cause the job to fail. [#108990][#108990] -- Introspection queries will now show the internal `node` user as the owner of tables in [`pg_catalog`](../v23.1/pg-catalog.html) and [`information_schema`](../v23.1/information-schema.html). Previously, the owner was shown as `admin`, but that was inaccurate since users with [the `admin` role](../v23.1/security-reference/authorization.html#admin-role) could not modify these tables in any way. [#109736][#109736] +- Added a `crdb_internal.reset_activity_tables` [builtin function](../v23.1/functions-and-operators.html) to allow users to reset the [statistics](../v23.1/cost-based-optimizer.html#table-statistics) in the `system.statement_activity` and `system.transaction_activity` tables. Users require [`admin`](../v23.1/security-reference/authorization.html#admin-role) to use this builtin. #106129 +- CockroachDB now reports [contention](../v23.1/performance-best-practices-overview.html#transaction-contention) time encountered while executing the mutation statements ([`INSERT`](../v23.1/insert.html), [`UPSERT`](../v23.1/upsert.html), [`UPDATE`](../v23.1/update.html), and [`DELETE`](../v23.1/delete.html)) when run via [`EXPLAIN ANALYZE`](../v23.1/explain-analyze.html). #106432 +- Fixed a bug in [`COPY CSV WITH HEADER`](../v23.1/copy.html) that was first introduced in [v23.1](../releases/v23.1.html). On previous versions of CockroachDB, a workaround is to disable [vectorized](../v23.1/vectorized-execution.html) `COPY` by setting the `vectorize` [session variable](../v23.1/set-vars.html#supported-variables) to `false` or to remove the header row from the `COPY` data. #106967 +- Added a new boolean [cluster setting ](../v23.1/cluster-settings.html) `sql.telemetry.query_sampling.internal.enabled`. If `true`, internal app queries will be reported to telemetry when query sampling to telemetry is enabled. #107182 +- The `SHOW QUERIES` and [`SHOW SESSIONS`](../v23.1/show-sessions.html) commands will now display timestamps using the session's timezone setting. #108144 +- Added the [cluster setting](../v23.1/cluster-settings.html) `sql.stats.limit_table_size.enabled`, which controls whether or not we enforce the row limit set by the `sql.stats.persisted_rows.max` cluster setting in the `system.statement_statistics` and `system.transaction_statistics` tables. #108913 +- Optimized the `sql-stats-compaction` [job](../v23.1/show-jobs.html)'s [delete query](../v23.1/delete.html) to avoid a [full scan](../v23.1/sql-tuning-with-explain.html#issue-full-table-scans). This helps avoid a [transaction retry error](../v23.1/common-errors.html#restart-transaction) which can cause the job to fail. #108990 +- Introspection queries will now show the internal `node` user as the owner of tables in [`pg_catalog`](../v23.1/pg-catalog.html) and [`information_schema`](../v23.1/information-schema.html). Previously, the owner was shown as `admin`, but that was inaccurate since users with [the `admin` role](../v23.1/security-reference/authorization.html#admin-role) could not modify these tables in any way. #109736

Operational changes

-- Added two new [metrics](../v23.1/metrics.html) to monitor [lease range preference](../v23.1/configure-replication-zones.html#lease_preferences) conformance: `leases.preferences.violating` indicates the number of valid leases a [store](../v23.1/cockroach-start.html#store) owns that satisfy none of the preferences applied; `leases.preferences.less-preferred` indicates the number of valid leases a store owns that satisfy some of the preferences applied, but not the first preference. [#107622][#107622] -- Added the `kv.enqueue_in_replicate_queue_on_span_config_update.enabled` [cluster setting.](../v23.1/cluster-settings.html). When set to `true`, [stores](../v23.1/cockroach-start.html#store) in the cluster will enqueue [replicas](../v23.1/architecture/overview.html#architecture-replica) for [replication changes](../v23.1/architecture/replication-layer.html) upon receiving config updates which could affect the replica. This setting is off by default. Enabling this setting speeds up how quickly config-triggered replication changes begin, but adds additional CPU overhead. The overhead scales with the number of leaseholders. [#108816][#108816] -- The RPC dial and heartbeat timeouts can now be configured via the following environment variables: `COCKROACH_RPC_DIAL_TIMEOUT`, which defaults to 2x the value of `COCKROACH_NETWORK_TIMEOUT`; `COCKROACH_RPC_HEARTBEAT_TIMEOUT`, which defaults to 3x `COCKROACH_NETWORK_TIMEOUT`. This allows configuring these values independently of [`COCKROACH_NETWORK_TIMEOUT`](../v23.1/architecture/distribution-layer.html#grpc), which defaults to {{site.data.constants.cockroach_network_timeout}}. [#109359][#109359] -- The default gRPC server-side send timeout has been increased from 2 seconds to 4 seconds (that is, from 1x to 2x the default value of [`COCKROACH_NETWORK_TIMEOUT`](../v23.1/architecture/distribution-layer.html#grpc)), to avoid spurious connection failures in certain scenarios. This can be controlled via the new environment variable `COCKROACH_RPC_SERVER_TIMEOUT`. [#109621][#109621] -- Added a new gauge [metric](../v23.1/metrics.html) `sql.schema.invalid_objects`. This gauge is periodically updated based on the schedule set by the `sql.schema.telemetry.recurrence` [cluster setting](../v23.1/cluster-settings.html). When the metric is updated, it counts the number of schema objects ([tables](../v23.1/create-table.html), [types](../v23.1/create-type.html), [schemas](../v23.1/create-schema.html), [databases](../v23.1/create-database.html), and [functions](../v23.1/user-defined-functions.html)) that are in an invalid state according to CockroachDB's internal validation checks. This metric is expected to be zero (`0`) in a healthy cluster. If it is not zero, it indicates that there is a problem that must be repaired. [#109739][#109739] -- The [cluster setting](../v23.1/cluster-settings.html) `kv.allocator.lease_rebalance_threshold` can now be used to control the minimum fraction away from the mean a [store's](../v23.1/cockroach-start.html#store) lease count before it is considered for lease transfers. The default setting is 0.05. [#105950][#105950] -- On Linux/ARM64, 16k page sizes are now used in jemalloc. This allows Linux/ARM64 users with 16k pages to run `cockroach`. [#107131][#107131] +- Added two new [metrics](../v23.1/metrics.html) to monitor [lease range preference](../v23.1/configure-replication-zones.html#lease_preferences) conformance: `leases.preferences.violating` indicates the number of valid leases a [store](../v23.1/cockroach-start.html#store) owns that satisfy none of the preferences applied; `leases.preferences.less-preferred` indicates the number of valid leases a store owns that satisfy some of the preferences applied, but not the first preference. #107622 +- Added the `kv.enqueue_in_replicate_queue_on_span_config_update.enabled` [cluster setting.](../v23.1/cluster-settings.html). When set to `true`, [stores](../v23.1/cockroach-start.html#store) in the cluster will enqueue [replicas](../v23.1/architecture/overview.html#architecture-replica) for [replication changes](../v23.1/architecture/replication-layer.html) upon receiving config updates which could affect the replica. This setting is off by default. Enabling this setting speeds up how quickly config-triggered replication changes begin, but adds additional CPU overhead. The overhead scales with the number of leaseholders. #108816 +- The RPC dial and heartbeat timeouts can now be configured via the following environment variables: `COCKROACH_RPC_DIAL_TIMEOUT`, which defaults to 2x the value of `COCKROACH_NETWORK_TIMEOUT`; `COCKROACH_RPC_HEARTBEAT_TIMEOUT`, which defaults to 3x `COCKROACH_NETWORK_TIMEOUT`. This allows configuring these values independently of [`COCKROACH_NETWORK_TIMEOUT`](../v23.1/architecture/distribution-layer.html#grpc), which defaults to {{site.data.constants.cockroach_network_timeout}}. #109359 +- The default gRPC server-side send timeout has been increased from 2 seconds to 4 seconds (that is, from 1x to 2x the default value of [`COCKROACH_NETWORK_TIMEOUT`](../v23.1/architecture/distribution-layer.html#grpc)), to avoid spurious connection failures in certain scenarios. This can be controlled via the new environment variable `COCKROACH_RPC_SERVER_TIMEOUT`. #109621 +- Added a new gauge [metric](../v23.1/metrics.html) `sql.schema.invalid_objects`. This gauge is periodically updated based on the schedule set by the `sql.schema.telemetry.recurrence` [cluster setting](../v23.1/cluster-settings.html). When the metric is updated, it counts the number of schema objects ([tables](../v23.1/create-table.html), [types](../v23.1/create-type.html), [schemas](../v23.1/create-schema.html), [databases](../v23.1/create-database.html), and [functions](../v23.1/user-defined-functions.html)) that are in an invalid state according to CockroachDB's internal validation checks. This metric is expected to be zero (`0`) in a healthy cluster. If it is not zero, it indicates that there is a problem that must be repaired. #109739 +- The [cluster setting](../v23.1/cluster-settings.html) `kv.allocator.lease_rebalance_threshold` can now be used to control the minimum fraction away from the mean a [store's](../v23.1/cockroach-start.html#store) lease count before it is considered for lease transfers. The default setting is 0.05. #105950 +- On Linux/ARM64, 16k page sizes are now used in jemalloc. This allows Linux/ARM64 users with 16k pages to run `cockroach`. #107131

DB Console changes

-- The generic DB Console "unexpected error" message now includes details about the actual error, along with other context to make it easier to root cause. [#106404][#106404] -- Fixed a bug where the [job details page](../v23.1/ui-jobs-page.html) would flicker between the job details and a loading animation when a job was still executing. [#106153][#106153] -- Added a timescale label to the **Diagnostics** tab of the [Statement Details page](../v23.1/ui-statements-page.html). Users are now able to see the time window for which the [statement diagnostics](../v23.1/explain-analyze.html#explain-analyze-debug) are displayed. [#107082][#107082] -- Fixed the options selection in the **Sort** dropdown on the [Network Latency page](../v23.1/ui-network-latency-page.html). [#107213][#107213] -- Search is performed on all ID fields of the **Transaction Executions** and **Statement Executions** views on the [Insights Page](../v23.1/ui-insights-page.html). [#107762][#107762] -- Enabled the `Now` option on the time picker that appears on several pages of the [DB Console](../v23.1/ui-overview.html). [#107837][#107837] +- The generic DB Console "unexpected error" message now includes details about the actual error, along with other context to make it easier to root cause. #106404 +- Fixed a bug where the [job details page](../v23.1/ui-jobs-page.html) would flicker between the job details and a loading animation when a job was still executing. #106153 +- Added a timescale label to the **Diagnostics** tab of the [Statement Details page](../v23.1/ui-statements-page.html). Users are now able to see the time window for which the [statement diagnostics](../v23.1/explain-analyze.html#explain-analyze-debug) are displayed. #107082 +- Fixed the options selection in the **Sort** dropdown on the [Network Latency page](../v23.1/ui-network-latency-page.html). #107213 +- Search is performed on all ID fields of the **Transaction Executions** and **Statement Executions** views on the [Insights Page](../v23.1/ui-insights-page.html). #107762 +- Enabled the `Now` option on the time picker that appears on several pages of the [DB Console](../v23.1/ui-overview.html). #107837

Bug fixes

-- Fixed a bug where the **node** and **regions** columns on the [Databases Page](../v23.1/ui-databases-page.html) did not properly render. This column is shown for clusters with more than 1 node. [#105824][#105824] -- Fixed an internal error when using [`EXPLAIN (TYPES)`](../v23.1/explain.html#types-option) on a [`DELETE FROM ... USING ... RETURNING`](../v23.1/delete.html) statement. This error was introduced in v23.1.0. [#106196][#106196] -- Fixed a bug which manifested itself in error messages containing "failed to drop all of the relevant elements" when executing DDL statements with the declarative [schema changer](../v23.1/online-schema-changes.html). What this really means is that there's a concurrent schema change that is ongoing. Instead we now behave as expected and wait for it to finish. [#106286][#106286] -- Fixed a panic executing [`CREATE AS`](../v23.1/create-table-as.html) with `pg_catalog.pg_prepared_statements` as a source. For example: [`CREATE TABLE t AS SELECT * FROM pg_catalog.pg_prepared_statements`](../v23.1/create-table-as.html); [`CREATE MATERIALIZED VIEW v AS SELECT * FROM pg_catalog.pg_prepared_statements`](../v23.1/create-view.html). [#106274][#106274] -- Fixed a panic executing [`CREATE AS`](../v23.1/create-table-as.html) with `pg_catalog.cursors` as a source. For example: [`CREATE TABLE t AS SELECT * FROM pg_catalog.pg_cursors`](../v23.1/create-table-as.html); [`CREATE MATERIALIZED VIEW v AS SELECT * FROM pg_catalog.pg_cursors`](../v23.1/create-view.html). [#106274][#106274] -- Fixed a panic executing [`CREATE AS`](../v23.1/create-table-as.html) with `crdb_internal.create_statements` as a source. For example: [`CREATE TABLE t AS SELECT * FROM crdb_internal.create_statements`](../v23.1/create-table-as.html); [`CREATE MATERIALIZED VIEW v AS SELECT * FROM crdb_internal.create_statements`](../v23.1/create-view.html). [#106274][#106274] -- Fixed a bug where the [Key Visualizer](../v23.1/ui-key-visualizer.html) was crashing with an "invalid conversion to int" message. [#106357][#106357] -- Fixed a bug in the declarative [schema changer](../v23.1/online-schema-changes.html) where adding a [foreign key](../v23.1/foreign-key.html) or setting a column to `NOT NULL` with a non-existent column produced an assertion error instead of the proper pgcode. [#105719][#105719] -- [`SHOW SCHEMAS FROM db_name`](../v23.1/show-schemas.html) will no longer incorrectly show schemas from the current database when the current database has a schema named `db_name`. [#106199][#106199] -- Fixed a bug in [optimizer](../v23.1/cost-based-optimizer.html) row count estimates of a scan from a partitioned [partial index](../v23.1/partial-indexes.html), which may cause an underestimated row count or a panic if the row count goes to zero. [#106309][#106309] -- Fixed a bug where a [transaction retry](../v23.1/transactions.html#transaction-retries) during the backfill of the `job_type` column in the [jobs table](../v23.1/show-jobs.html) could result in some job records with no `job_type` value. [#106412][#106412] -- Fixed a bug where some [secondary indexes](../v23.1/indexes.html) would incorrectly be treated internally as [primary indexes](../v23.1/primary-key.html), which could cause some [schema change](../v23.1/online-schema-changes.html) operations to fail. The bug could occur if [`ALTER TABLE ... ALTER PRIMARY KEY`](../v23.1/alter-table.html#alter-primary-key) was used on CockroachDB [v21.1](../releases/v21.1.html) or earlier, and the cluster was upgraded. [#106426][#106426] -- In the [DB Console](../v23.1/ui-overview.html), selecting a database filter from the filters menu in the [SQL Activity Page](../v23.1/ui-overview.html#sql-activity) should function as expected. This fixes a bug where the filter would break and not show any results when the results were retrieved from the statement activity table instead of the persisted table. [#106434][#106434] -- The statement tag for [`SHOW SEQUENCES`](../v23.1/show-sequences.html) is now corrected to be `SHOW SEQUENCES` instead of [`SHOW SCHEMAS`](../v23.1/show-schemas.html). [#106797][#106797] -- Fixed a rare bug in which some uploads via [`cockroach userfile upload`](../v23.1/cockroach-userfile-upload.html) would silently upload incorrect data. [#106698][#106698] -- Under prolonged unavailability (such as loss of [quorum](../v23.1/architecture/overview.html#architecture-overview-consensus)), affected [ranges](../v23.1/architecture/overview.html#architecture-range) would exhibit [Raft log](../v23.1/architecture/replication-layer.html#raft-logs) growth that was quadratic as a function of the duration of the outage. Now this growth is approximately linear instead. [#106054][#106054] -- Fixed a bug on the [**Metrics**](../v23.1/metrics.html) page that was causing it to not update automatically on rolling window options. [#106766][#106766] -- Blocked dropping of [indexes](../v23.1/indexes.html) impacted by [Technical Advisory 99561](../advisories/a99561.html) if dropping those indexes could cause data loss to occur. [#106863][#106863] -- [Debug zips](../v23.1/cockroach-debug-zip.html) are now properly showing the information from [`crdb_internal.cluster_settings`](../v23.1/crdb-internal.html). The file `crdb_internal.cluster_settings.txt` in debug zips was empty due to this bug on CockroachDB [v23.1.5](../releases/v23.1.html#v23-1-5) (which was the only version affected). [#107105][#107105] -- A bug has been fixed that caused internal errors instead of user errors when queries contained labelled [tuples](../v23.1/scalar-expressions.html#tuple-constructors) with a different number of elements and labels, e.g., `(ROW(1, 2) AS a)`. This bug had been present since CockroachDB [v23.1.0](../releases/v23.1.html). [#106959][#106959] -- Fixed a failing [schema change](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) when [CREATE TABLE AS](../v23.1/create-table-as.html) or [`CREATE MATERIALIZED VIEW AS`](../v23.1/create-view.html) sources from a `SHOW` command as in the following examples [#107226][#107226]: +- Fixed a bug where the **node** and **regions** columns on the [Databases Page](../v23.1/ui-databases-page.html) did not properly render. This column is shown for clusters with more than 1 node. #105824 +- Fixed an internal error when using [`EXPLAIN (TYPES)`](../v23.1/explain.html#types-option) on a [`DELETE FROM ... USING ... RETURNING`](../v23.1/delete.html) statement. This error was introduced in v23.1.0. #106196 +- Fixed a bug which manifested itself in error messages containing "failed to drop all of the relevant elements" when executing DDL statements with the declarative [schema changer](../v23.1/online-schema-changes.html). What this really means is that there's a concurrent schema change that is ongoing. Instead we now behave as expected and wait for it to finish. #106286 +- Fixed a panic executing [`CREATE AS`](../v23.1/create-table-as.html) with `pg_catalog.pg_prepared_statements` as a source. For example: [`CREATE TABLE t AS SELECT * FROM pg_catalog.pg_prepared_statements`](../v23.1/create-table-as.html); [`CREATE MATERIALIZED VIEW v AS SELECT * FROM pg_catalog.pg_prepared_statements`](../v23.1/create-view.html). #106274 +- Fixed a panic executing [`CREATE AS`](../v23.1/create-table-as.html) with `pg_catalog.cursors` as a source. For example: [`CREATE TABLE t AS SELECT * FROM pg_catalog.pg_cursors`](../v23.1/create-table-as.html); [`CREATE MATERIALIZED VIEW v AS SELECT * FROM pg_catalog.pg_cursors`](../v23.1/create-view.html). #106274 +- Fixed a panic executing [`CREATE AS`](../v23.1/create-table-as.html) with `crdb_internal.create_statements` as a source. For example: [`CREATE TABLE t AS SELECT * FROM crdb_internal.create_statements`](../v23.1/create-table-as.html); [`CREATE MATERIALIZED VIEW v AS SELECT * FROM crdb_internal.create_statements`](../v23.1/create-view.html). #106274 +- Fixed a bug where the [Key Visualizer](../v23.1/ui-key-visualizer.html) was crashing with an "invalid conversion to int" message. #106357 +- Fixed a bug in the declarative [schema changer](../v23.1/online-schema-changes.html) where adding a [foreign key](../v23.1/foreign-key.html) or setting a column to `NOT NULL` with a non-existent column produced an assertion error instead of the proper pgcode. #105719 +- [`SHOW SCHEMAS FROM db_name`](../v23.1/show-schemas.html) will no longer incorrectly show schemas from the current database when the current database has a schema named `db_name`. #106199 +- Fixed a bug in [optimizer](../v23.1/cost-based-optimizer.html) row count estimates of a scan from a partitioned [partial index](../v23.1/partial-indexes.html), which may cause an underestimated row count or a panic if the row count goes to zero. #106309 +- Fixed a bug where a [transaction retry](../v23.1/transactions.html#transaction-retries) during the backfill of the `job_type` column in the [jobs table](../v23.1/show-jobs.html) could result in some job records with no `job_type` value. #106412 +- Fixed a bug where some [secondary indexes](../v23.1/indexes.html) would incorrectly be treated internally as [primary indexes](../v23.1/primary-key.html), which could cause some [schema change](../v23.1/online-schema-changes.html) operations to fail. The bug could occur if [`ALTER TABLE ... ALTER PRIMARY KEY`](../v23.1/alter-table.html#alter-primary-key) was used on CockroachDB [v21.1](../releases/v21.1.html) or earlier, and the cluster was upgraded. #106426 +- In the [DB Console](../v23.1/ui-overview.html), selecting a database filter from the filters menu in the [SQL Activity Page](../v23.1/ui-overview.html#sql-activity) should function as expected. This fixes a bug where the filter would break and not show any results when the results were retrieved from the statement activity table instead of the persisted table. #106434 +- The statement tag for [`SHOW SEQUENCES`](../v23.1/show-sequences.html) is now corrected to be `SHOW SEQUENCES` instead of [`SHOW SCHEMAS`](../v23.1/show-schemas.html). #106797 +- Fixed a rare bug in which some uploads via [`cockroach userfile upload`](../v23.1/cockroach-userfile-upload.html) would silently upload incorrect data. #106698 +- Under prolonged unavailability (such as loss of [quorum](../v23.1/architecture/overview.html#architecture-overview-consensus)), affected [ranges](../v23.1/architecture/overview.html#architecture-range) would exhibit [Raft log](../v23.1/architecture/replication-layer.html#raft-logs) growth that was quadratic as a function of the duration of the outage. Now this growth is approximately linear instead. #106054 +- Fixed a bug on the [**Metrics**](../v23.1/metrics.html) page that was causing it to not update automatically on rolling window options. #106766 +- Blocked dropping of [indexes](../v23.1/indexes.html) impacted by [Technical Advisory 99561](../advisories/a99561.html) if dropping those indexes could cause data loss to occur. #106863 +- [Debug zips](../v23.1/cockroach-debug-zip.html) are now properly showing the information from [`crdb_internal.cluster_settings`](../v23.1/crdb-internal.html). The file `crdb_internal.cluster_settings.txt` in debug zips was empty due to this bug on CockroachDB [v23.1.5](../releases/v23.1.html#v23-1-5) (which was the only version affected). #107105 +- A bug has been fixed that caused internal errors instead of user errors when queries contained labelled [tuples](../v23.1/scalar-expressions.html#tuple-constructors) with a different number of elements and labels, e.g., `(ROW(1, 2) AS a)`. This bug had been present since CockroachDB [v23.1.0](../releases/v23.1.html). #106959 +- Fixed a failing [schema change](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) when [CREATE TABLE AS](../v23.1/create-table-as.html) or [`CREATE MATERIALIZED VIEW AS`](../v23.1/create-view.html) sources from a `SHOW` command as in the following examples #107226: - `CREATE TABLE t AS SELECT * FROM [SHOW CREATE TABLE tbl];` - `CREATE TABLE t AS SELECT * FROM [SHOW INDEXES FROM tbl];` - `CREATE TABLE t AS SELECT * FROM [SHOW COLUMNS FROM tbl];` - `CREATE TABLE t AS SELECT * FROM [SHOW CONSTRAINTS FROM tbl];` - `CREATE TABLE t AS SELECT * FROM [SHOW PARTITIONS FROM TABLE tbl];` - `CREATE TABLE t AS SELECT * FROM [SHOW PARTITIONS FROM INDEX tbl@tbl_pkey];` -- Index recommendations in the [DB Console](../v23.1/ui-overview.html) no longer use the fully qualified name of a table to create an index name, allowing the creating of [indexes](../v23.1/indexes.html) directly from the DB Console to work. [#107218][#107218] -- Fixed a bug where [`SHOW GRANTS`](../v23.1/show-grants.html) could fail if any objects were offline, which can happen during a [`RESTORE`](../v23.1/restore.html). [#107238][#107238] -- The `pg_get_serial_sequence` [builtin function](../v23.1/functions-and-operators.html) can now handle mixed-case names correctly. [#107372][#107372] -- Fixed a bug with the "SQL statement diagnostic request" [HTTP API](../v23.1/cluster-api.html) that would affect CockroachDB {{ site.data.products.serverless }} clusters was fixed. This bug had existed since CockroachDB [v22.1]({% link releases/v22.1.md %}). [#107385][#107385] -- Fixed a bug where under rare circumstances, a [replication](../v23.1/architecture/replication-layer.html) change could get stuck when proposed near lease/leadership changes (and likely under overload), and the [replica circuit breakers](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#per-replica-circuit-breakers) could trip. [#107059][#107059] -- Fixed a bug in upstream `etcd-io/raft` which could result in pulling unlimited amount of logs into memory, and lead to out-of-memory errors. Now the log scan has a limited memory footprint. [#104483][#104483] -- Fixed a bug where, in rare circumstances, a [replication](https://cockroachlabs.com/docs/v23.1/architecture/replication-layer) could get stuck when proposed near lease or leadership changes, especially under overload, and the [replica circuit breakers]([../v23.1](https://cockroachlabs.com/docs/v23.1/architecture/replication-layer#per-replica-circuit-breakers) could trip. A previous attempt to fix this issue has been reverted in favor of this fix. [#106515][#106515] -- Fixed a bug in the SQL syntax for [`CREATE TABLE AS`](../v23.1/create-table-as.html) [schema change](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) description. [#107404][#107404] -- Fixed an internal error in [`UPDATE`](../v23.1/update.html), [`UPSERT`](../v23.1/upsert.html), [`INSERT`](../v23.1/insert.html), or [`DELETE`](../v23.1/delete.html) statements run concurrently with [`ALTER TABLE ... ADD COLUMN`](../v23.1/alter-table.html#add-column) of a [virtual computed column](../v23.1/computed-columns.html#virtual-computed-columns) on the same table. [#107403][#107403] -- Fixed a bug that caused internal errors when using [user-defined types](../v23.1/create-type.html) in [views](../v23.1/views.html) and [user-defined functions](../v23.1/user-defined-functions.html) that have [subqueries](../v23.1/subqueries.html). This bug was present when using views since version v21.2. It was present when using user-defined functions since [v23.1](../releases/v23.1.html). [#106955][#106955] -- The timeout duration when loading the [**Hot Ranges** page](../v23.1/ui-hot-ranges-page.html) has been increased to 30 minutes. [#107497][#107497] -- Fixed the SQL syntax for [`CREATE MATERIALIZED VIEW AS`](../v23.1/views.html#materialized-views) [schema change](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) descriptions. [#107471][#107471] -- Reduced [contention](../v23.1/performance-best-practices-overview.html#transaction-contention) on the `system.statement_statistics` table which has caused the [SQL statistics](../v23.1/cost-based-optimizer.html#table-statistics) compaction [job](../v23.1/show-jobs.html) to fail. [#107573][#107573] -- The `ST_ClosestPoint` [spatial function](../v23.1/functions-and-operators.html#spatial-functions) previously did not preserve the correct [SRID](../v23.1/srid-4326.html) when comparing two different [points](../v23.1/point.html). This is now resolved. [#107595][#107595] -- CockroachDB would previously crash when evaluating the `ST_AsEncodedPolyline` [builtin function](../v23.1/functions-and-operators.html#spatial-functions) on a [`GEOMETRYCOLLECTION` geometry type](../v23.1/geometrycollection.html). The bug was introduced before CockroachDB [v22.1](../releases/v22.1.html) and is now fixed. [#107901][#107901] -- Fixed a bug where dropping an [index](../v23.1/indexes.html) could end up failing or cleaning [foreign keys](../v23.1/foreign-key.html) (when `CASCADE` is specified) on other tables referencing the target table with this index. [#107793][#107793] -- Fixed a bug where CockroachDB would return an error when using [`SHOW RANGE ... FOR ROW ...`](../v23.1/show-range-for-row.html) in a [`CREATE TABLE AS`](../v23.1/create-table-as.html) construct. [#107603][#107603] -- Cloud buckets containing [backups](../v23.1/backup.html) can now be copied via AWS DataSync and other third-party services which insert empty objects with a trailing `/`. Previously, [restore](../v23.1/restore.html) would fail with the following error message: `read LATEST path: path does not contain a completed latest backup: NoSuchKey`. [#106235][#106235] -- Fixed a bug where [`DROP COLUMN CASCADE`](../v23.1/alter-table.html#drop-column) involving a [primary key](../v23.1/primary-key.html) column could end up hanging. [#107987][#107987] -- Fixed a nil pointer dereference caused by a race condition when using the `to_char` [builtin function](../v23.1/functions-and-operators.html). [#108079][#108079] -- Since [v22.2.0](../releases/v22.2.html), using a PTP clock device (enabled by the [`--clock-device` flag](../v23.1/cockroach-start.html#flags-clock-device)) would generate timestamps in the far future. It now generates the correct time. This could cause nodes to crash due to incorrect timestamps, or in the worst case irreversibly advance the cluster's [HLC clock](../v23.1/architecture/transaction-layer.html#time-and-hybrid-logical-clocks) into the far future. [#108113][#108113] -- Previously, CockroachDB, when planning expressions containing many sub-expressions (e.g., deeply-nested `AND` / `OR` structures), would use memory quadratic in the number of sub-expressions. In the worst cases (thousands of sub-expressions), this could lead to [OOMs](../v23.1/cluster-setup-troubleshooting.html#out-of-memory-oom-crash). The bug had been present since at least [v22.1](../releases/v22.1.html) and has now been fixed. [#107368][#107368] -- When losing a [leaseholder](../v23.1/architecture/overview.html#architecture-leaseholder) and using [lease preferences](../v23.1/configure-replication-zones.html#lease_preferences), the lease can be acquired by any other [replica](../v23.1/architecture/overview.html#architecture-replica) (regardless of lease preferences) in order to restore availability as soon as possible. The new leaseholder will now immediately check if it violates the lease preferences, and attempt to transfer the lease to a replica that satisfies the preferences if possible. [#107625][#107625] -- Fixed the [schema changer](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) when [`CREATE AS`](../v23.1/create-table-as.html) sources from [`SHOW CREATE FUNCTION`](../v23.1/show-create.html#show-the-create-function-statement-for-a-function), e.g.: `CREATE TABLE t AS SELECT * FROM [SHOW CREATE FUNCTION f]`. [#108195][#108195] -- Added cancel checking to [index](../v23.1/indexes.html) constraint initialization code to allow queries to timeout during [query optimization](../v23.1/cost-based-optimizer.html) if analyzing predicates to constrain an index starts using too many resources. Example of setting a timeout using the [`statement_timeout` session setting](../v23.1/set-vars.html#statement-timeout): `SET statement_timeout='5.0s';` [#106942][#106942] -- Previously, CockroachDB could encounter an internal error `unexpected non-zero bytes limit for txnKVStreamer` when evaluating [locality-optimized](../v23.1/cost-based-optimizer.html#locality-optimized-search-in-multi-region-clusters) lookup [joins](../v23.1/joins.html) in case it had to perform the remote regions' lookup. The bug was introduced in [v22.2](../releases/v22.2.html) and is now fixed. A temporary workaround can be accomplished without upgrading by setting the `streamer_enabled` [session variable](../v23.1/session-variables.html): `SET streamer_enabled = false;`. [#108251][#108251] -- Fixed a spurious error `no data source matches prefix` that could occur during [planning for a query](../v23.1/cost-based-optimizer.html) with `DISTINCT ON` and [`ORDER BY ASC NULLS LAST`](../v23.1/order-by.html) or `ORDER BY DESC NULLS FIRST`. [#108303][#108303] -- Fixed an [optimizer](../v23.1/cost-based-optimizer.html) costing bug introduced in [v23.1](../releases/v23.1.html) that could cause a query involving two or more [joins](../v23.1/joins.html) with [`REGIONAL BY ROW`](../v23.1/table-localities.html#regional-by-row-tables) tables to not pick the most optimal [lookup joins](../v23.1/joins.html#lookup-joins). [#108308][#108308] -- Fixed an [optimizer](../v23.1/cost-based-optimizer.html) costing bug introduced in [v23.1](../releases/v23.1.html) that could cause a query whose best-cost query plan is a string of [lookup joins](../v23.1/joins.html#lookup-joins) with [`REGIONAL BY ROW`](../v23.1/table-localities.html#regional-by-row-tables) tables, one after the other in sequence, to not pick the most optimal join plan. [#108308][#108308] -- Previously, using [`IMPORT INTO`](../v23.1/import-into.html) for `DELIMITED DATA` or MySQL imports would error with `column ... does not exist` if it was importing into a [collated string](../v23.1/collate.html) column. This is now fixed. [#108286][#108286] -- Fixed a bug introduced in [v23.1](../releases/v23.1.html) that could cause the precision of some values to be incorrectly truncated for a query with a correlated [subquery](../v23.1/subqueries.html) and an equality between a column from the subquery and the outer query. This applies to types that are "equivalent" but have different precision levels, e.g., [`DECIMAL(10, 0)`](../v23.1/decimal.html) vs `DECIMAL(10, 2)` or `NAME` vs [`CHAR`](../v23.1/string.html). [#108198][#108198] -- Fixed a bug where [`BEGIN`](../v23.1/begin-transaction.html), [`COMMIT`](../v23.1/commit-transaction.html), [`SET`](../v23.1/set-vars.html), [`ROLLBACK`](../v23.1/rollback-transaction.html), and [`SAVEPOINT`](../v23.1/savepoint.html) statements would not be written to the [execution](../v23.1/logging-overview.html#logging-destinations) or [audit logs](../v23.1/sql-audit-logging.html). [#108411][#108411] -- Fixed a bug where a session migration performed by `SHOW TRANSFER STATE` would not handle prepared statements that used the [`AS OF SYSTEM TIME` clause](../v23.1/as-of-system-time.html). Users who encountered this bug would see errors such as `expected 1 or 0 for number of format codes, got N`. This bug was present since [v22.2.0](../releases/v22.2.html). [#108548][#108548] -- Fixed errors on the [**Sessions** page](../v23.1/ui-sessions-page.html) in the [DB Console](../v23.1/ui-overview.html) when a session's memory usage is zero bytes. [#108733][#108733] -- Fixed a bug introduced in [v22.1](../releases/v22.1.html) that could cause a [join](../v23.1/joins.html) to infinite-loop in rare cases when (1) the join filter is not an equality and (2) no columns from the left input are returned. [#108821][#108821] -- Fixed an issue with the full scan filter on the [**Statements** page](../v23.1/ui-statements-page.html) where the filter was always evaluating to false, even if a full scan had occurred. [#109254][#109254] -- Fixed a bug that could cause CPU usage to increase over time. [#109298][#109298] -- Fixed a bug that could cause some rows to be silently skipped during [`IMPORT`](../v23.1/import.html) when a node's import worker failed. [#109664][#109664] -- Fixed a bug in [geospatial](../v23.1/spatial-data-overview.html) queries, where a query filter of the form `ST_Distance(geog1, geog2) > constant`, or `ST_MaxDistance(geom1, geom2) > constant`, where the operator is one of `>`, `<`, `>=`, `<=`, or a filter of the form `ST_Distance(geog1, geog2, false) = 0` may mistakenly evaluate to `true` when one or both of the inputs is `NULL` or an empty [geography](../v23.1/architecture/glossary.html#geography) or [geometry](../v23.1/architecture/glossary.html#geometry). More rows could be returned by the query than expected. [#109395][#109395] -- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html). This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. [#109778][#109778] -- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html) with the [cluster setting](../v23.1/cluster-settings.html) `bulkio.restore.use_simple_import_spans=true`. This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. [#109939][#109939] -- Fixed a bug that caused a flood of requests to refresh [cluster settings](../v23.1/cluster-settings.html) on the [Advanced Debug page](../v23.1/ui-debug-pages.html#configuration). If a user would like to see the effect of a modified cluster setting in [DB Console](../v23.1/ui-overview.html), a page reload is now required. [#108125][#108125] +- Index recommendations in the [DB Console](../v23.1/ui-overview.html) no longer use the fully qualified name of a table to create an index name, allowing the creating of [indexes](../v23.1/indexes.html) directly from the DB Console to work. #107218 +- Fixed a bug where [`SHOW GRANTS`](../v23.1/show-grants.html) could fail if any objects were offline, which can happen during a [`RESTORE`](../v23.1/restore.html). #107238 +- The `pg_get_serial_sequence` [builtin function](../v23.1/functions-and-operators.html) can now handle mixed-case names correctly. #107372 +- Fixed a bug with the "SQL statement diagnostic request" [HTTP API](../v23.1/cluster-api.html) that would affect CockroachDB {{ site.data.products.serverless }} clusters was fixed. This bug had existed since CockroachDB [v22.1]({% link releases/v22.1.md %}). #107385 +- Fixed a bug where under rare circumstances, a [replication](../v23.1/architecture/replication-layer.html) change could get stuck when proposed near lease/leadership changes (and likely under overload), and the [replica circuit breakers](https://www.cockroachlabs.com/docs/v23.1/architecture/replication-layer#per-replica-circuit-breakers) could trip. #107059 +- Fixed a bug in upstream `etcd-io/raft` which could result in pulling unlimited amount of logs into memory, and lead to out-of-memory errors. Now the log scan has a limited memory footprint. #104483 +- Fixed a bug where, in rare circumstances, a [replication](https://cockroachlabs.com/docs/v23.1/architecture/replication-layer) could get stuck when proposed near lease or leadership changes, especially under overload, and the [replica circuit breakers]([../v23.1](https://cockroachlabs.com/docs/v23.1/architecture/replication-layer#per-replica-circuit-breakers) could trip. A previous attempt to fix this issue has been reverted in favor of this fix. #106515 +- Fixed a bug in the SQL syntax for [`CREATE TABLE AS`](../v23.1/create-table-as.html) [schema change](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) description. #107404 +- Fixed an internal error in [`UPDATE`](../v23.1/update.html), [`UPSERT`](../v23.1/upsert.html), [`INSERT`](../v23.1/insert.html), or [`DELETE`](../v23.1/delete.html) statements run concurrently with [`ALTER TABLE ... ADD COLUMN`](../v23.1/alter-table.html#add-column) of a [virtual computed column](../v23.1/computed-columns.html#virtual-computed-columns) on the same table. #107403 +- Fixed a bug that caused internal errors when using [user-defined types](../v23.1/create-type.html) in [views](../v23.1/views.html) and [user-defined functions](../v23.1/user-defined-functions.html) that have [subqueries](../v23.1/subqueries.html). This bug was present when using views since version v21.2. It was present when using user-defined functions since [v23.1](../releases/v23.1.html). #106955 +- The timeout duration when loading the [**Hot Ranges** page](../v23.1/ui-hot-ranges-page.html) has been increased to 30 minutes. #107497 +- Fixed the SQL syntax for [`CREATE MATERIALIZED VIEW AS`](../v23.1/views.html#materialized-views) [schema change](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) descriptions. #107471 +- Reduced [contention](../v23.1/performance-best-practices-overview.html#transaction-contention) on the `system.statement_statistics` table which has caused the [SQL statistics](../v23.1/cost-based-optimizer.html#table-statistics) compaction [job](../v23.1/show-jobs.html) to fail. #107573 +- The `ST_ClosestPoint` [spatial function](../v23.1/functions-and-operators.html#spatial-functions) previously did not preserve the correct [SRID](../v23.1/srid-4326.html) when comparing two different [points](../v23.1/point.html). This is now resolved. #107595 +- CockroachDB would previously crash when evaluating the `ST_AsEncodedPolyline` [builtin function](../v23.1/functions-and-operators.html#spatial-functions) on a [`GEOMETRYCOLLECTION` geometry type](../v23.1/geometrycollection.html). The bug was introduced before CockroachDB [v22.1](../releases/v22.1.html) and is now fixed. #107901 +- Fixed a bug where dropping an [index](../v23.1/indexes.html) could end up failing or cleaning [foreign keys](../v23.1/foreign-key.html) (when `CASCADE` is specified) on other tables referencing the target table with this index. #107793 +- Fixed a bug where CockroachDB would return an error when using [`SHOW RANGE ... FOR ROW ...`](../v23.1/show-range-for-row.html) in a [`CREATE TABLE AS`](../v23.1/create-table-as.html) construct. #107603 +- Cloud buckets containing [backups](../v23.1/backup.html) can now be copied via AWS DataSync and other third-party services which insert empty objects with a trailing `/`. Previously, [restore](../v23.1/restore.html) would fail with the following error message: `read LATEST path: path does not contain a completed latest backup: NoSuchKey`. #106235 +- Fixed a bug where [`DROP COLUMN CASCADE`](../v23.1/alter-table.html#drop-column) involving a [primary key](../v23.1/primary-key.html) column could end up hanging. #107987 +- Fixed a nil pointer dereference caused by a race condition when using the `to_char` [builtin function](../v23.1/functions-and-operators.html). #108079 +- Since [v22.2.0](../releases/v22.2.html), using a PTP clock device (enabled by the [`--clock-device` flag](../v23.1/cockroach-start.html#flags-clock-device)) would generate timestamps in the far future. It now generates the correct time. This could cause nodes to crash due to incorrect timestamps, or in the worst case irreversibly advance the cluster's [HLC clock](../v23.1/architecture/transaction-layer.html#time-and-hybrid-logical-clocks) into the far future. #108113 +- Previously, CockroachDB, when planning expressions containing many sub-expressions (e.g., deeply-nested `AND` / `OR` structures), would use memory quadratic in the number of sub-expressions. In the worst cases (thousands of sub-expressions), this could lead to [OOMs](../v23.1/cluster-setup-troubleshooting.html#out-of-memory-oom-crash). The bug had been present since at least [v22.1](../releases/v22.1.html) and has now been fixed. #107368 +- When losing a [leaseholder](../v23.1/architecture/overview.html#architecture-leaseholder) and using [lease preferences](../v23.1/configure-replication-zones.html#lease_preferences), the lease can be acquired by any other [replica](../v23.1/architecture/overview.html#architecture-replica) (regardless of lease preferences) in order to restore availability as soon as possible. The new leaseholder will now immediately check if it violates the lease preferences, and attempt to transfer the lease to a replica that satisfies the preferences if possible. #107625 +- Fixed the [schema changer](../v23.1/online-schema-changes.html) [job](../v23.1/show-jobs.html) when [`CREATE AS`](../v23.1/create-table-as.html) sources from [`SHOW CREATE FUNCTION`](../v23.1/show-create.html#show-the-create-function-statement-for-a-function), e.g.: `CREATE TABLE t AS SELECT * FROM [SHOW CREATE FUNCTION f]`. #108195 +- Added cancel checking to [index](../v23.1/indexes.html) constraint initialization code to allow queries to timeout during [query optimization](../v23.1/cost-based-optimizer.html) if analyzing predicates to constrain an index starts using too many resources. Example of setting a timeout using the [`statement_timeout` session setting](../v23.1/set-vars.html#statement-timeout): `SET statement_timeout='5.0s';` #106942 +- Previously, CockroachDB could encounter an internal error `unexpected non-zero bytes limit for txnKVStreamer` when evaluating [locality-optimized](../v23.1/cost-based-optimizer.html#locality-optimized-search-in-multi-region-clusters) lookup [joins](../v23.1/joins.html) in case it had to perform the remote regions' lookup. The bug was introduced in [v22.2](../releases/v22.2.html) and is now fixed. A temporary workaround can be accomplished without upgrading by setting the `streamer_enabled` [session variable](../v23.1/session-variables.html): `SET streamer_enabled = false;`. #108251 +- Fixed a spurious error `no data source matches prefix` that could occur during [planning for a query](../v23.1/cost-based-optimizer.html) with `DISTINCT ON` and [`ORDER BY ASC NULLS LAST`](../v23.1/order-by.html) or `ORDER BY DESC NULLS FIRST`. #108303 +- Fixed an [optimizer](../v23.1/cost-based-optimizer.html) costing bug introduced in [v23.1](../releases/v23.1.html) that could cause a query involving two or more [joins](../v23.1/joins.html) with [`REGIONAL BY ROW`](../v23.1/table-localities.html#regional-by-row-tables) tables to not pick the most optimal [lookup joins](../v23.1/joins.html#lookup-joins). #108308 +- Fixed an [optimizer](../v23.1/cost-based-optimizer.html) costing bug introduced in [v23.1](../releases/v23.1.html) that could cause a query whose best-cost query plan is a string of [lookup joins](../v23.1/joins.html#lookup-joins) with [`REGIONAL BY ROW`](../v23.1/table-localities.html#regional-by-row-tables) tables, one after the other in sequence, to not pick the most optimal join plan. #108308 +- Previously, using [`IMPORT INTO`](../v23.1/import-into.html) for `DELIMITED DATA` or MySQL imports would error with `column ... does not exist` if it was importing into a [collated string](../v23.1/collate.html) column. This is now fixed. #108286 +- Fixed a bug introduced in [v23.1](../releases/v23.1.html) that could cause the precision of some values to be incorrectly truncated for a query with a correlated [subquery](../v23.1/subqueries.html) and an equality between a column from the subquery and the outer query. This applies to types that are "equivalent" but have different precision levels, e.g., [`DECIMAL(10, 0)`](../v23.1/decimal.html) vs `DECIMAL(10, 2)` or `NAME` vs [`CHAR`](../v23.1/string.html). #108198 +- Fixed a bug where [`BEGIN`](../v23.1/begin-transaction.html), [`COMMIT`](../v23.1/commit-transaction.html), [`SET`](../v23.1/set-vars.html), [`ROLLBACK`](../v23.1/rollback-transaction.html), and [`SAVEPOINT`](../v23.1/savepoint.html) statements would not be written to the [execution](../v23.1/logging-overview.html#logging-destinations) or [audit logs](../v23.1/sql-audit-logging.html). #108411 +- Fixed a bug where a session migration performed by `SHOW TRANSFER STATE` would not handle prepared statements that used the [`AS OF SYSTEM TIME` clause](../v23.1/as-of-system-time.html). Users who encountered this bug would see errors such as `expected 1 or 0 for number of format codes, got N`. This bug was present since [v22.2.0](../releases/v22.2.html). #108548 +- Fixed errors on the [**Sessions** page](../v23.1/ui-sessions-page.html) in the [DB Console](../v23.1/ui-overview.html) when a session's memory usage is zero bytes. #108733 +- Fixed a bug introduced in [v22.1](../releases/v22.1.html) that could cause a [join](../v23.1/joins.html) to infinite-loop in rare cases when (1) the join filter is not an equality and (2) no columns from the left input are returned. #108821 +- Fixed an issue with the full scan filter on the [**Statements** page](../v23.1/ui-statements-page.html) where the filter was always evaluating to false, even if a full scan had occurred. #109254 +- Fixed a bug that could cause CPU usage to increase over time. #109298 +- Fixed a bug that could cause some rows to be silently skipped during [`IMPORT`](../v23.1/import.html) when a node's import worker failed. #109664 +- Fixed a bug in [geospatial](../v23.1/spatial-data-overview.html) queries, where a query filter of the form `ST_Distance(geog1, geog2) > constant`, or `ST_MaxDistance(geom1, geom2) > constant`, where the operator is one of `>`, `<`, `>=`, `<=`, or a filter of the form `ST_Distance(geog1, geog2, false) = 0` may mistakenly evaluate to `true` when one or both of the inputs is `NULL` or an empty [geography](../v23.1/architecture/glossary.html#geography) or [geometry](../v23.1/architecture/glossary.html#geometry). More rows could be returned by the query than expected. #109395 +- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html). This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. #109778 +- Fixed an issue where a [split](../v23.1/architecture/distribution-layer.html#range-splits) can be called on an invalid key that's in the form of `someValidKey.Next()` during [restore](../v23.1/restore.html) with the [cluster setting](../v23.1/cluster-settings.html) `bulkio.restore.use_simple_import_spans=true`. This split key can land in the middle of a row with [column families](../v23.1/column-families.html), and thus result in failing SQL queries when querying the restored table. #109939 +- Fixed a bug that caused a flood of requests to refresh [cluster settings](../v23.1/cluster-settings.html) on the [Advanced Debug page](../v23.1/ui-debug-pages.html#configuration). If a user would like to see the effect of a modified cluster setting in [DB Console](../v23.1/ui-overview.html), a page reload is now required. #108125

Performance improvements

-- [Backups](../v23.1/backup-and-restore-overview.html) no longer perform work proportional to the number of pending [write intents](../v23.1/architecture/transaction-layer.html#write-intents) that they encounter, so they are more than 100x faster when encountering long-running, bulk writing transactions. [#105526][#105526] -- Queries that access [`pg_catalog`](../v23.1/pg-catalog.html) and [`information_schema`](../v23.1/information-schema.html) that perform introspection on other tables in those schemas are now significantly faster. [#109736][#109736] +- [Backups](../v23.1/backup-and-restore-overview.html) no longer perform work proportional to the number of pending [write intents](../v23.1/architecture/transaction-layer.html#write-intents) that they encounter, so they are more than 100x faster when encountering long-running, bulk writing transactions. #105526 +- Queries that access [`pg_catalog`](../v23.1/pg-catalog.html) and [`information_schema`](../v23.1/information-schema.html) that perform introspection on other tables in those schemas are now significantly faster. #109736
@@ -132,103 +132,5 @@ This release includes 378 merged PRs by 67 authors.
-[#105526]: https://github.com/cockroachdb/cockroach/pull/105526 -[#105719]: https://github.com/cockroachdb/cockroach/pull/105719 -[#105824]: https://github.com/cockroachdb/cockroach/pull/105824 -[#105826]: https://github.com/cockroachdb/cockroach/pull/105826 -[#105950]: https://github.com/cockroachdb/cockroach/pull/105950 -[#106054]: https://github.com/cockroachdb/cockroach/pull/106054 -[#106129]: https://github.com/cockroachdb/cockroach/pull/106129 -[#106153]: https://github.com/cockroachdb/cockroach/pull/106153 -[#106179]: https://github.com/cockroachdb/cockroach/pull/106179 -[#106184]: https://github.com/cockroachdb/cockroach/pull/106184 -[#106196]: https://github.com/cockroachdb/cockroach/pull/106196 -[#106199]: https://github.com/cockroachdb/cockroach/pull/106199 -[#106217]: https://github.com/cockroachdb/cockroach/pull/106217 -[#106235]: https://github.com/cockroachdb/cockroach/pull/106235 -[#106274]: https://github.com/cockroachdb/cockroach/pull/106274 -[#106286]: https://github.com/cockroachdb/cockroach/pull/106286 -[#106309]: https://github.com/cockroachdb/cockroach/pull/106309 -[#106357]: https://github.com/cockroachdb/cockroach/pull/106357 -[#106376]: https://github.com/cockroachdb/cockroach/pull/106376 -[#106399]: https://github.com/cockroachdb/cockroach/pull/106399 -[#106404]: https://github.com/cockroachdb/cockroach/pull/106404 -[#106412]: https://github.com/cockroachdb/cockroach/pull/106412 -[#106426]: https://github.com/cockroachdb/cockroach/pull/106426 -[#106432]: https://github.com/cockroachdb/cockroach/pull/106432 -[#106434]: https://github.com/cockroachdb/cockroach/pull/106434 -[#106465]: https://github.com/cockroachdb/cockroach/pull/106465 -[#106688]: https://github.com/cockroachdb/cockroach/pull/106688 -[#106698]: https://github.com/cockroachdb/cockroach/pull/106698 -[#106766]: https://github.com/cockroachdb/cockroach/pull/106766 -[#106776]: https://github.com/cockroachdb/cockroach/pull/106776 -[#106797]: https://github.com/cockroachdb/cockroach/pull/106797 -[#106806]: https://github.com/cockroachdb/cockroach/pull/106806 -[#106807]: https://github.com/cockroachdb/cockroach/pull/106807 -[#106857]: https://github.com/cockroachdb/cockroach/pull/106857 -[#106863]: https://github.com/cockroachdb/cockroach/pull/106863 -[#106942]: https://github.com/cockroachdb/cockroach/pull/106942 -[#106955]: https://github.com/cockroachdb/cockroach/pull/106955 -[#106959]: https://github.com/cockroachdb/cockroach/pull/106959 -[#106967]: https://github.com/cockroachdb/cockroach/pull/106967 -[#107059]: https://github.com/cockroachdb/cockroach/pull/107059 -[#107082]: https://github.com/cockroachdb/cockroach/pull/107082 -[#107105]: https://github.com/cockroachdb/cockroach/pull/107105 -[#107131]: https://github.com/cockroachdb/cockroach/pull/107131 -[#107182]: https://github.com/cockroachdb/cockroach/pull/107182 -[#107213]: https://github.com/cockroachdb/cockroach/pull/107213 -[#107218]: https://github.com/cockroachdb/cockroach/pull/107218 -[#107226]: https://github.com/cockroachdb/cockroach/pull/107226 -[#107238]: https://github.com/cockroachdb/cockroach/pull/107238 -[#107368]: https://github.com/cockroachdb/cockroach/pull/107368 -[#107372]: https://github.com/cockroachdb/cockroach/pull/107372 -[#107385]: https://github.com/cockroachdb/cockroach/pull/107385 -[#107403]: https://github.com/cockroachdb/cockroach/pull/107403 -[#107404]: https://github.com/cockroachdb/cockroach/pull/107404 -[#107471]: https://github.com/cockroachdb/cockroach/pull/107471 -[#107497]: https://github.com/cockroachdb/cockroach/pull/107497 -[#107573]: https://github.com/cockroachdb/cockroach/pull/107573 -[#107575]: https://github.com/cockroachdb/cockroach/pull/107575 -[#107595]: https://github.com/cockroachdb/cockroach/pull/107595 -[#107603]: https://github.com/cockroachdb/cockroach/pull/107603 -[#107622]: https://github.com/cockroachdb/cockroach/pull/107622 -[#107625]: https://github.com/cockroachdb/cockroach/pull/107625 -[#107762]: https://github.com/cockroachdb/cockroach/pull/107762 -[#107793]: https://github.com/cockroachdb/cockroach/pull/107793 -[#107837]: https://github.com/cockroachdb/cockroach/pull/107837 -[#107901]: https://github.com/cockroachdb/cockroach/pull/107901 -[#107933]: https://github.com/cockroachdb/cockroach/pull/107933 -[#107987]: https://github.com/cockroachdb/cockroach/pull/107987 -[#108074]: https://github.com/cockroachdb/cockroach/pull/108074 -[#108079]: https://github.com/cockroachdb/cockroach/pull/108079 -[#108113]: https://github.com/cockroachdb/cockroach/pull/108113 -[#108125]: https://github.com/cockroachdb/cockroach/pull/108125 -[#108144]: https://github.com/cockroachdb/cockroach/pull/108144 -[#108195]: https://github.com/cockroachdb/cockroach/pull/108195 -[#108198]: https://github.com/cockroachdb/cockroach/pull/108198 -[#108251]: https://github.com/cockroachdb/cockroach/pull/108251 -[#108286]: https://github.com/cockroachdb/cockroach/pull/108286 -[#108303]: https://github.com/cockroachdb/cockroach/pull/108303 -[#108308]: https://github.com/cockroachdb/cockroach/pull/108308 -[#108411]: https://github.com/cockroachdb/cockroach/pull/108411 -[#108457]: https://github.com/cockroachdb/cockroach/pull/108457 -[#108548]: https://github.com/cockroachdb/cockroach/pull/108548 -[#108733]: https://github.com/cockroachdb/cockroach/pull/108733 -[#108816]: https://github.com/cockroachdb/cockroach/pull/108816 -[#108821]: https://github.com/cockroachdb/cockroach/pull/108821 -[#108913]: https://github.com/cockroachdb/cockroach/pull/108913 -[#108990]: https://github.com/cockroachdb/cockroach/pull/108990 -[#109019]: https://github.com/cockroachdb/cockroach/pull/109019 -[#109254]: https://github.com/cockroachdb/cockroach/pull/109254 -[#109298]: https://github.com/cockroachdb/cockroach/pull/109298 -[#109359]: https://github.com/cockroachdb/cockroach/pull/109359 -[#109395]: https://github.com/cockroachdb/cockroach/pull/109395 -[#109621]: https://github.com/cockroachdb/cockroach/pull/109621 -[#109664]: https://github.com/cockroachdb/cockroach/pull/109664 -[#109736]: https://github.com/cockroachdb/cockroach/pull/109736 -[#109739]: https://github.com/cockroachdb/cockroach/pull/109739 -[#109778]: https://github.com/cockroachdb/cockroach/pull/109778 -[#109860]: https://github.com/cockroachdb/cockroach/pull/109860 -[#109939]: https://github.com/cockroachdb/cockroach/pull/109939 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.1.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.1.md index d329f7f0319..bd5e5fe653e 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.1.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.1.md @@ -6,43 +6,43 @@ Release Date: September 26, 2023

Backward-incompatible changes

-- The pre-v23.1 output produced by `SHOW RANGES`, `crdb_internal.ranges`, and `crdb_internal.ranges_no_leases` was deprecated in 23.1 and is now replaced by default with output that's compatible with coalesced ranges (i.e., ranges that pack multiple tables/indexes/partitions into individual ranges). See the [v23.1 release notes]({% link releases/v23.1.md %}) for `SHOW RANGES` for more details. [#102961][#102961] -- When a deployment is configured to use a time zone (new feature) for log file output using formats `crdb-v1` or `crdb-v2`, it becomes impossible to process the new output log files using the [`cockroach debug merge-logs` command]({% link v23.2/cockroach-debug-merge-logs.md %}) from a previous version. The newest `cockroach debug merge-logs` code must be used instead. [#104265][#104265] -- When customizing the [SQL shell's interactive prompt]({% link v23.2/cockroach-sql.md %}), the special sequence `%M` now expands to the full host name instead of the combination of host name and port number. To include the port number explicitly, use `%>`. The special sequence `%m` now expands to the host name up to the first period. [#105137][#105137] -- The [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) command stores data retrieved from SQL tables in the remote cluster using the TSV format by default. [#107474][#107474] -- The [`changefeed.protect_timestamp.max_age` cluster setting]({% link v23.2/protect-changefeed-data.md %}) will only apply to newly created changefeeds in v23.2. For existing changefeeds, you can set the [`protect_data_from_gc_on_pause`]({% link v23.2/create-changefeed.md %}#protect-pause) option so that changefeeds do not experience infinite retries and accumulate protected change data. You can use the [`ALTER CHANGEFEED`]({% link v23.2/alter-changefeed.md %}) statement to add `protect_data_from_gc_on_pause` to existing changefeeds. [#103539][#103539] +- The pre-v23.1 output produced by `SHOW RANGES`, `crdb_internal.ranges`, and `crdb_internal.ranges_no_leases` was deprecated in 23.1 and is now replaced by default with output that's compatible with coalesced ranges (i.e., ranges that pack multiple tables/indexes/partitions into individual ranges). See the [v23.1 release notes]({% link releases/v23.1.md %}) for `SHOW RANGES` for more details. #102961 +- When a deployment is configured to use a time zone (new feature) for log file output using formats `crdb-v1` or `crdb-v2`, it becomes impossible to process the new output log files using the [`cockroach debug merge-logs` command]({% link v23.2/cockroach-debug-merge-logs.md %}) from a previous version. The newest `cockroach debug merge-logs` code must be used instead. #104265 +- When customizing the [SQL shell's interactive prompt]({% link v23.2/cockroach-sql.md %}), the special sequence `%M` now expands to the full host name instead of the combination of host name and port number. To include the port number explicitly, use `%>`. The special sequence `%m` now expands to the host name up to the first period. #105137 +- The [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) command stores data retrieved from SQL tables in the remote cluster using the TSV format by default. #107474 +- The [`changefeed.protect_timestamp.max_age` cluster setting]({% link v23.2/protect-changefeed-data.md %}) will only apply to newly created changefeeds in v23.2. For existing changefeeds, you can set the [`protect_data_from_gc_on_pause`]({% link v23.2/create-changefeed.md %}#protect-pause) option so that changefeeds do not experience infinite retries and accumulate protected change data. You can use the [`ALTER CHANGEFEED`]({% link v23.2/alter-changefeed.md %}) statement to add `protect_data_from_gc_on_pause` to existing changefeeds. #103539 - The `changefeed.new_pubsub_sink_enabled` cluster setting is now enabled by default, which improves changefeed throughput. With this setting enabled, the top-level fields in JSON-encoded messages are capitalized: `{Key: ..., Value: ..., Topic: ...}`. After upgrading to CockroachDB v23.2, you may need to reconfigure downstream systems to parse the new message format. If you disable this setting, changefeeds emitting to Pub/Sub sinks with JSON-encoded events have the top-level message fields all lowercase: `{key: ..., value: ..., topic: ...}`.

Security updates

-- Users who have the [`CREATEROLE` role option]({% link v23.2/grant.md %}) can now grant and revoke role membership in any non-admin role. This change also removes the [`sql.auth.createrole_allows_grant_role_membership.enabled` cluster setting]({% link v23.2/cluster-settings.md %}), which was added in v23.1. In v23.2, the cluster setting is effectively always true. [#104376][#104376] +- Users who have the [`CREATEROLE` role option]({% link v23.2/grant.md %}) can now grant and revoke role membership in any non-admin role. This change also removes the [`sql.auth.createrole_allows_grant_role_membership.enabled` cluster setting]({% link v23.2/cluster-settings.md %}), which was added in v23.1. In v23.2, the cluster setting is effectively always true. #104376

General changes

-- You can now set [Docker]({% link v23.2/start-a-local-cluster-in-docker-mac.md %}) command arguments using the `COCKROACH_ARGS` environment variable. [#98899][#98899] -- Extended the [`/api/v2/nodes` API endpoint](https://www.cockroachlabs.com/docs/api/cluster/v2.html) with a `storeMetrics` field. [#98208][#98208] -- CockroachDB would previously use separate [ranges]({% link v23.2/architecture/distribution-layer.md %}) for each table, index, or partition. This is no longer true. It is possible now to have multiple tables, indexes, and partitions get packed into the same range. For users with many of these schema objects, this will reduce the total range count in their clusters. This is especially true if individual tables, indexes, or partitions are smaller than the default configured maximum range size (controlled using [zone configs]({% link v23.2/zone-config-extensions.md %}), specifically the `range_max_bytes` parameter). We made this change to improve scalability with respect to the number of schema objects, since the underlying range count is now no longer a bottleneck. Users upgrading from v22.2, when finalizing their upgrade, may observe a round of range merges and snapshot transfers (to power said range merges) as a result of this change. If users want to opt-out of this optimization, they can configure the following cluster setting: `SET CLUSTER SETTING spanconfig.storage_coalesce_adjacent.enabled = false;` [#98820][#98820] -- [`EXPORT INTO PARQUET`]({% link v23.2/export.md %}) will now use a new internal implementation for writing Parquet files using the Parquet spec version 2.6. There should be no significant impact to the structure of files being written. There is one minor change: all columns written to Parquet files will be nullable (i.e., the Parquet repetition type is `OPTIONAL`). [#104234][#104234] -- [Spatial libraries]({% link v23.2/spatial-data-overview.md %}) for CockroachDB now rely on GEOS 3.11 instead of GEOS 3.8. [#106642][#106642] -- CockroachDB no longer distributes `libgeos` for the experimental [Windows build]({% link v23.2/install-cockroachdb-windows.md %}). Users can instead install GEOS directly from the source: . [#106642][#106642] -- The Formatting of byte figures in Pebble logs has been improved. Tools that parse these logs might need updating. [#107392][#107392] -- CockroachDB now has a new [CLI option]({% link v23.2/cockroach-commands.md %}), `--experimental-shared-storage` to rebalance data faster from node to node. [#105839][#105839] -- Fixed a bug where, internally, if we print a 0 decimal with a very low exponent we use excessive memory. This is not possible when using the [DECIMAL]({% link v23.2/decimal.md %}) type, but may be possible when using `crdb_internal` functions. [#110527][#110527] +- You can now set [Docker]({% link v23.2/start-a-local-cluster-in-docker-mac.md %}) command arguments using the `COCKROACH_ARGS` environment variable. #98899 +- Extended the [`/api/v2/nodes` API endpoint](https://www.cockroachlabs.com/docs/api/cluster/v2.html) with a `storeMetrics` field. #98208 +- CockroachDB would previously use separate [ranges]({% link v23.2/architecture/distribution-layer.md %}) for each table, index, or partition. This is no longer true. It is possible now to have multiple tables, indexes, and partitions get packed into the same range. For users with many of these schema objects, this will reduce the total range count in their clusters. This is especially true if individual tables, indexes, or partitions are smaller than the default configured maximum range size (controlled using [zone configs]({% link v23.2/zone-config-extensions.md %}), specifically the `range_max_bytes` parameter). We made this change to improve scalability with respect to the number of schema objects, since the underlying range count is now no longer a bottleneck. Users upgrading from v22.2, when finalizing their upgrade, may observe a round of range merges and snapshot transfers (to power said range merges) as a result of this change. If users want to opt-out of this optimization, they can configure the following cluster setting: `SET CLUSTER SETTING spanconfig.storage_coalesce_adjacent.enabled = false;` #98820 +- [`EXPORT INTO PARQUET`]({% link v23.2/export.md %}) will now use a new internal implementation for writing Parquet files using the Parquet spec version 2.6. There should be no significant impact to the structure of files being written. There is one minor change: all columns written to Parquet files will be nullable (i.e., the Parquet repetition type is `OPTIONAL`). #104234 +- [Spatial libraries]({% link v23.2/spatial-data-overview.md %}) for CockroachDB now rely on GEOS 3.11 instead of GEOS 3.8. #106642 +- CockroachDB no longer distributes `libgeos` for the experimental [Windows build]({% link v23.2/install-cockroachdb-windows.md %}). Users can instead install GEOS directly from the source: . #106642 +- The Formatting of byte figures in Pebble logs has been improved. Tools that parse these logs might need updating. #107392 +- CockroachDB now has a new [CLI option]({% link v23.2/cockroach-commands.md %}), `--experimental-shared-storage` to rebalance data faster from node to node. #105839 +- Fixed a bug where, internally, if we print a 0 decimal with a very low exponent we use excessive memory. This is not possible when using the [DECIMAL]({% link v23.2/decimal.md %}) type, but may be possible when using `crdb_internal` functions. #110527

{{ site.data.products.enterprise }} edition changes

-- The [`kafka_sink_config`]({% link v23.2/advanced-changefeed-configuration.md %}) `Compression` and `RequiredAcks` options are now case-insensitive. [#100929][#100929] -- [Changefeeds]({% link v23.2/change-data-capture-overview.md %}) emit significantly fewer duplicate messages during node and cluster restarts. [#102717][#102717] -- CockroachDB has a new `changefeed.protect_timestamp.max_age` setting (by default 4 days), which will cancel running changefeed jobs if they fail to make forward progress for a period of time. This setting is used if the explicit `gc_protect_expires_after` option is not set. In addition, the `protect_data_from_gc_on_pause` option has been deprecated. This option is no longer needed since changefeed jobs always protect data. [#103539][#103539] -- Changefeeds now officially support the Parquet format using specification version 2.6. It is only usable with the [cloud storage sink]({% link v23.2/changefeed-sinks.md %}#cloud-storage-sink). The syntax to use Parquet is: `CREATE CHANGEFEED FOR foo INTO ... WITH format=parquet`. It supports all standard changefeed options and features including CDC transformations, except it does not support the `topic_in_value` option. [#104528][#104528] -- Changefeeds that create files over an HTTP connection may now be specified using `INTO 'file-https://'` to disambiguate with `webhook-https`. [#107572][#107572] -- The `pgcrypto` [functions]({% link v23.2/functions-and-operators.md %}) `encrypt`, `encrypt_iv`, `decrypt`, and `decrypt_iv` are now implemented. These functions require an enterprise license on a CCL distribution. [#105654][#105654] -- CockroachDB now paces the rangefeed goroutine creation rate to improve scheduler latency. This improves observability by adding an additional column in the `crdb_internal.active_rangefeed` table to indicate if the range is currently in catchup scan mode. [#109346][#109346] +- The [`kafka_sink_config`]({% link v23.2/advanced-changefeed-configuration.md %}) `Compression` and `RequiredAcks` options are now case-insensitive. #100929 +- [Changefeeds]({% link v23.2/change-data-capture-overview.md %}) emit significantly fewer duplicate messages during node and cluster restarts. #102717 +- CockroachDB has a new `changefeed.protect_timestamp.max_age` setting (by default 4 days), which will cancel running changefeed jobs if they fail to make forward progress for a period of time. This setting is used if the explicit `gc_protect_expires_after` option is not set. In addition, the `protect_data_from_gc_on_pause` option has been deprecated. This option is no longer needed since changefeed jobs always protect data. #103539 +- Changefeeds now officially support the Parquet format using specification version 2.6. It is only usable with the [cloud storage sink]({% link v23.2/changefeed-sinks.md %}#cloud-storage-sink). The syntax to use Parquet is: `CREATE CHANGEFEED FOR foo INTO ... WITH format=parquet`. It supports all standard changefeed options and features including CDC transformations, except it does not support the `topic_in_value` option. #104528 +- Changefeeds that create files over an HTTP connection may now be specified using `INTO 'file-https://'` to disambiguate with `webhook-https`. #107572 +- The `pgcrypto` [functions]({% link v23.2/functions-and-operators.md %}) `encrypt`, `encrypt_iv`, `decrypt`, and `decrypt_iv` are now implemented. These functions require an enterprise license on a CCL distribution. #105654 +- CockroachDB now paces the rangefeed goroutine creation rate to improve scheduler latency. This improves observability by adding an additional column in the `crdb_internal.active_rangefeed` table to indicate if the range is currently in catchup scan mode. #109346

SQL language changes

-- Fixed the helper message on [UPDATE]({% link v23.2/update.md %}) SQL statements to include the optional FROM cause. [#98709][#98709] -- CockroachDB now supports enabling forward [indexes]({% link v23.2/indexes.md %}) and ordering on [JSON]({% link v23.2/jsonb.md %}) values. [#99275][#99275] +- Fixed the helper message on [UPDATE]({% link v23.2/update.md %}) SQL statements to include the optional FROM cause. #98709 +- CockroachDB now supports enabling forward [indexes]({% link v23.2/indexes.md %}) and ordering on [JSON]({% link v23.2/jsonb.md %}) values. #99275 - Added a new column `visibility` to `crdb_internal.table_indexes` and `information_schema.statistics`. Also added a new column `visibility` to the output of following SQL statements: {% include_cached copy-clipboard.html %} @@ -70,35 +70,35 @@ Release Date: September 26, 2023 SHOW KEYS FROM DATABASE (database_name); ~~~ - This new column contains a floating point number specifying the level of visibility of the index, from 0 (not visible) to 1 (fully visible). If the value is between 0 and 1, the index will be visible to the corresponding fraction of queries. [#101334][#101334] - -- `ALTER INDEX ... VISIBILITY ...` is now supported. It can change an index visibility to any visibility between 0.0 and 1.0. Visibility 0.0 means the index is not visible to the [optimizer]({% link v23.2/cost-based-optimizer.md %}), while visibility 1.0 means the index is fully visible. A value in the range between 0.0 and 1.0 means the index will be visible to the corresponding fraction of queries. [#87301][#87301] -- CockroachDB now has support for non-aggregate expressions involving columns outside of the grouping columns when the grouping columns include all key columns of a unique index and those key columns are not nullable. [#101675][#101675] -- CockroachDB now supports [`CREATE INDEX ... VISIBILITY ...`]({% link v23.2/create-index.md %}) and [`CREATE TABLE ... (... INDEX (...) VISIBILITY ...)`]({% link v23.2/create-table.md %}). This allows users to set the index visibility to any visibility between 0.0 and 1.0. Visibility 0.0 means the index is not visible to the optimizer, while visibility 1.0 means the index is fully visible. A value in the range between 0.0 and 1.0 means the index will be visible to the corresponding fraction of queries. [#101812][#101812] -- [Row level TTL]({% link v23.2/row-level-ttl.md %}) now supports `DESC` order primary key columns. [#101869][#101869] -- Added the `ST_BdPolyFromText` [built-in]({% link v23.2/functions-and-operators.md %}#spatial-functions) which copies the behavior of the PostGIS function. Takes in only a multilinestring geometry and returns a polygon. It will return an error if anything other than a multilinestring is input, and will return an error if internally a multipolygon is created for some reason. `NULL` inputs also return `NULL`. [#102708][#102708] -- [`SHOW SCHEDULES`]({% link v23.2/show-schedules.md %}) now shows the schedule options with which the schedules were created. `SHOW SCHEDULES FOR BACKUP` additionally shows if the schedule is a full or incremental backup schedule. [#102890][#102890] -- You can no longer use `PREPARE` with [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) statements. Previously, this was allowed, but attempts to `EXECUTE` the prepared `EXPLAIN ANALYZE` statements would result in an error. [#103259][#103259] -- `ttl_expiration_expression` now allows stable operators and functions. This allows intervals to be directly added to `TIMESTAMPTZ` expressions. See . [#102974][#102974] -- CockroachDB now allows `INSERT` commands in [UDF]({% link v23.2/user-defined-functions.md %}) statement bodies. [#102773][#102773] -- CockroachDB now allows `UPDATE` and `UPSERT` commands in UDF statement bodies. [#102773][#102773] + This new column contains a floating point number specifying the level of visibility of the index, from 0 (not visible) to 1 (fully visible). If the value is between 0 and 1, the index will be visible to the corresponding fraction of queries. #101334 + +- `ALTER INDEX ... VISIBILITY ...` is now supported. It can change an index visibility to any visibility between 0.0 and 1.0. Visibility 0.0 means the index is not visible to the [optimizer]({% link v23.2/cost-based-optimizer.md %}), while visibility 1.0 means the index is fully visible. A value in the range between 0.0 and 1.0 means the index will be visible to the corresponding fraction of queries. #87301 +- CockroachDB now has support for non-aggregate expressions involving columns outside of the grouping columns when the grouping columns include all key columns of a unique index and those key columns are not nullable. #101675 +- CockroachDB now supports [`CREATE INDEX ... VISIBILITY ...`]({% link v23.2/create-index.md %}) and [`CREATE TABLE ... (... INDEX (...) VISIBILITY ...)`]({% link v23.2/create-table.md %}). This allows users to set the index visibility to any visibility between 0.0 and 1.0. Visibility 0.0 means the index is not visible to the optimizer, while visibility 1.0 means the index is fully visible. A value in the range between 0.0 and 1.0 means the index will be visible to the corresponding fraction of queries. #101812 +- [Row level TTL]({% link v23.2/row-level-ttl.md %}) now supports `DESC` order primary key columns. #101869 +- Added the `ST_BdPolyFromText` [built-in]({% link v23.2/functions-and-operators.md %}#spatial-functions) which copies the behavior of the PostGIS function. Takes in only a multilinestring geometry and returns a polygon. It will return an error if anything other than a multilinestring is input, and will return an error if internally a multipolygon is created for some reason. `NULL` inputs also return `NULL`. #102708 +- [`SHOW SCHEDULES`]({% link v23.2/show-schedules.md %}) now shows the schedule options with which the schedules were created. `SHOW SCHEDULES FOR BACKUP` additionally shows if the schedule is a full or incremental backup schedule. #102890 +- You can no longer use `PREPARE` with [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) statements. Previously, this was allowed, but attempts to `EXECUTE` the prepared `EXPLAIN ANALYZE` statements would result in an error. #103259 +- `ttl_expiration_expression` now allows stable operators and functions. This allows intervals to be directly added to `TIMESTAMPTZ` expressions. See . #102974 +- CockroachDB now allows `INSERT` commands in [UDF]({% link v23.2/user-defined-functions.md %}) statement bodies. #102773 +- CockroachDB now allows `UPDATE` and `UPSERT` commands in UDF statement bodies. #102773 - The `READ COMMITTED` [isolation level]({% link v23.2/transactions.md %}#isolation-levels) is now supported. It can be used in the following ways: - When starting a transaction, use `BEGIN ISOLATION LEVEL READ COMMITTED`. - After starting a transaction, but before performing reads or writes, use `SET TRANSACTION ISOLATION READ COMMITTED`. - Configure it as the default isolation level using the `default_transaction_isolation` session variable. To see the isolation level of the currently running transaction, use either `SHOW TRANSACTION ISOLATION LEVEL` or `SHOW transaction_isolation`. - [#103482][#103482] - -- Added version gates which require all nodes in a given cluster to have a minimum binary version number, which in turn is required for creating forward indexes on JSON columns and for ordering JSON columns. [#101932][#101932] -- CockroachDB now allows `DELETE` commands in UDF statement bodies. [#103531][#103531] -- Added a new cluster setting `sql.auth.public_schema_create_privilege.enabled` which controls whether users receive [`CREATE` privileges]({% link v23.2/grant.md %}) on the public schema or not. The setting applies at the time that the public schema is created, which happens whenever a database is created. The setting is `true` by default. [#103598][#103598] -- [`EXPLAIN (DDL)`]({% link v23.2/explain.md %}) statements now have descriptor, index, column, constraint, and other ID values decorated with names when available. There is now also a new `EXPLAIN (DDL, SHAPE)` statement that provides information on costly operations planned by the declarative schema changer, like which index backfills and validations will get performed. [#103930][#103930] -- A new statistic `KV pairs read` is now exposed on [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) output in some cases (when this number is different from the `KV rows read` statistic or when the `VERBOSE` option is requested). This new statistic is also added to the telemetry sampled query events. [#104079][#104079] -- The `KV rows read` statistic in `EXPLAIN ANALYZE` output has been renamed to `KV rows decoded` to better reflect its meaning. [#104079][#104079] -- Table names are now allowed in `SELECT` lists inside [view]({% link v23.2/views.md %}) and UDF definitions. [#104929][#104929] -- [`SHOW JOB WITH EXECUTION DETAILS`]({% link v23.2/show-jobs.md %}) for a backup job will regenerate the DistSQL plan diagram with per-node and per-processor progress information. This will help users better understand the state of a running backup job. [#103145][#103145] -- The `crdb_internal.node_transactions` and `crdb_internal.cluster_transactions` tables now have columns for `isolation_level`, `priority`, and `quality_of_service`. [#105009][#105009] + #103482 + +- Added version gates which require all nodes in a given cluster to have a minimum binary version number, which in turn is required for creating forward indexes on JSON columns and for ordering JSON columns. #101932 +- CockroachDB now allows `DELETE` commands in UDF statement bodies. #103531 +- Added a new cluster setting `sql.auth.public_schema_create_privilege.enabled` which controls whether users receive [`CREATE` privileges]({% link v23.2/grant.md %}) on the public schema or not. The setting applies at the time that the public schema is created, which happens whenever a database is created. The setting is `true` by default. #103598 +- [`EXPLAIN (DDL)`]({% link v23.2/explain.md %}) statements now have descriptor, index, column, constraint, and other ID values decorated with names when available. There is now also a new `EXPLAIN (DDL, SHAPE)` statement that provides information on costly operations planned by the declarative schema changer, like which index backfills and validations will get performed. #103930 +- A new statistic `KV pairs read` is now exposed on [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) output in some cases (when this number is different from the `KV rows read` statistic or when the `VERBOSE` option is requested). This new statistic is also added to the telemetry sampled query events. #104079 +- The `KV rows read` statistic in `EXPLAIN ANALYZE` output has been renamed to `KV rows decoded` to better reflect its meaning. #104079 +- Table names are now allowed in `SELECT` lists inside [view]({% link v23.2/views.md %}) and UDF definitions. #104929 +- [`SHOW JOB WITH EXECUTION DETAILS`]({% link v23.2/show-jobs.md %}) for a backup job will regenerate the DistSQL plan diagram with per-node and per-processor progress information. This will help users better understand the state of a running backup job. #103145 +- The `crdb_internal.node_transactions` and `crdb_internal.cluster_transactions` tables now have columns for `isolation_level`, `priority`, and `quality_of_service`. #105009 - The [`SHOW RANGES`]({% link v23.2/show-ranges.md %}) command will now emit span statistics when the `DETAILS` option is specified. The statistics are included in a new column named `span_stats`, as a `JSON` object. The statistics are calculated for the identifier of each row. `SHOW RANGES WITH DETAILS` will compute span statistics for each range. `SHOW RANGES WITH TABLES, DETAILS` will compute span statistics for each table, and so on. The `span_stats` `JSON` object has the following keys: - `approximate_disk_bytes` @@ -117,9 +117,9 @@ Release Date: September 26, 2023 `intent_count` is the number of keys tracked under `intent_bytes`. It is equal to the number of meta keys in the system with a non-empty Transaction proto. `intent_bytes` is the number of bytes in intent key-value pairs (without their meta keys). - [#103128][#103128] + #103128 -- Introduced the `pg_lsn` data type, which is used to store the `lsn` associated with replication. [#105031][#105031] +- Introduced the `pg_lsn` data type, which is used to store the `lsn` associated with replication. #105031 - Users now can issue one [`ALTER TABLE` statement]({% link v23.2/alter-table.md %}) with a combination of any number of `ADD COLUMN`, any number of `DROP COLUMN`, one `ALTER PRIMARY KEY`, and any number of `ADD CONSTRAINT` clauses. For example, with this PR, we now support statements like: {% include_cached copy-clipboard.html %} @@ -127,70 +127,70 @@ Release Date: September 26, 2023 CREATE TABLE t (i INT PRIMARY KEY, j INT NOT NULL, k INT NOT NULL); ALTER TABLE t ADD COLUMN p INT DEFAULT 30, ALTER PRIMARY KEY USING COLUMNS (j), DROP COLUMN k, ADD CHECK (i > 0); ~~~ - [#99526][#99526] - -- Added the ability to add numeric values to LSNs, or sub a decimal value from a LSN. [#105326][#105326] -- Implemented the `pg_lsn - pg_lsn = decimal` built-in function, which subtracts 2 LSNs to return a decimal. [#105326][#105326] -- Added limited support for scalar PL/pgSQL functions. Supported statements are variable declarations, variable assignments, `IF` statements, simple `LOOP` statements (with no conditions), `EXIT` and `CONTINUE` statements, and `RETURN` statements. [#104755][#104755] -- Implemented the spatial built-in `ST_AsMVTGeom`. [#105530][#105530] -- `Pg_class`'s `relreplident` field was previously unpopulated. It is now populated with `d` for all tables (as each table has a primary key) and `n` otherwise. [#106242][#106242] -- Added the `pg_sequence_last_value` built-in function, which returns the last value generated by the sequence. [#106445][#106445] -- [`RESTORE`]({% link v23.2/restore.md %}) can now be passed a `WITH EXECUTION LOCALITY` option similar to [`BACKUP`]({% link v23.2/backup.md %}), to restrict execution of the job to nodes with matching localities. [#104439][#104439] -- Added the `REPLICATION` user role option, which allows a user to use the streaming replication protocol. There is a corresponding [`REPLICATION` system privilege]({% link v23.2/grant.md %}). [#106082][#106082] -- A new view-only [session variable]({% link v23.2/session-variables.md %}), `max_connections` was added. This can be used with `SHOW` to view the maximum amount of non-superuser SQL connections allowed at a given time. [#106952][#106952] -- Added the `nameconcatoid` built-in function, which concatenates a name with an OID. [#105944][#105944] -- The `pg_catalog.pg_language` table is now populated with data about the languages used to define functions. [#105944][#105944] -- The `information_schema.routines` view is now populated with information about functions. [#105944][#105944] -- The `information_schema.parameters` table is now populated with information about function parameters. [#105944][#105944] -- Added support for the PLpgSQL `RAISE` statement, which allows sending notices to the client and raising errors. Currently the notice is only sent to the client. Support for logging notices will be added in a future release. [#106351][#106351] -- The `public` pseudo-role now receives the `EXECUTE` privilege by default for all user-defined functions that are created. This can be adjusted by using `ALTER DEFAULT PRIVILEGES`. [#107317][#107317] -- The `crdb_interanal.node_statement_statistics` table redacts the error message if the user has the `VIEWACTIVITYREDACTED` privilege, and does not redact the error message if the user has `VIEWACTIVITY`. If the user has both, `VIEWACTIVITYREDACTED` takes precedence and the last error is redacted. [#107076][#107076] -- The `crdb_internal.cluster_locks` table now has a `isolation_level` column indicating the isolation level. [#107309][#107309] -- In `CommonSQLExecDetails`, which is emitted as part of the SQL audit logs, SQL exec logs, and telemetry events, there is a new field: `StmtPosInTxn`. It represents the statement's index in the transaction, starting at 1. [#107081][#107081] -- `cluster_logical_timestamp` now returns an error when called at isolation levels lower than `SERIALIZABLE`. [#107090][#107090] + #99526 + +- Added the ability to add numeric values to LSNs, or sub a decimal value from a LSN. #105326 +- Implemented the `pg_lsn - pg_lsn = decimal` built-in function, which subtracts 2 LSNs to return a decimal. #105326 +- Added limited support for scalar PL/pgSQL functions. Supported statements are variable declarations, variable assignments, `IF` statements, simple `LOOP` statements (with no conditions), `EXIT` and `CONTINUE` statements, and `RETURN` statements. #104755 +- Implemented the spatial built-in `ST_AsMVTGeom`. #105530 +- `Pg_class`'s `relreplident` field was previously unpopulated. It is now populated with `d` for all tables (as each table has a primary key) and `n` otherwise. #106242 +- Added the `pg_sequence_last_value` built-in function, which returns the last value generated by the sequence. #106445 +- [`RESTORE`]({% link v23.2/restore.md %}) can now be passed a `WITH EXECUTION LOCALITY` option similar to [`BACKUP`]({% link v23.2/backup.md %}), to restrict execution of the job to nodes with matching localities. #104439 +- Added the `REPLICATION` user role option, which allows a user to use the streaming replication protocol. There is a corresponding [`REPLICATION` system privilege]({% link v23.2/grant.md %}). #106082 +- A new view-only [session variable]({% link v23.2/session-variables.md %}), `max_connections` was added. This can be used with `SHOW` to view the maximum amount of non-superuser SQL connections allowed at a given time. #106952 +- Added the `nameconcatoid` built-in function, which concatenates a name with an OID. #105944 +- The `pg_catalog.pg_language` table is now populated with data about the languages used to define functions. #105944 +- The `information_schema.routines` view is now populated with information about functions. #105944 +- The `information_schema.parameters` table is now populated with information about function parameters. #105944 +- Added support for the PLpgSQL `RAISE` statement, which allows sending notices to the client and raising errors. Currently the notice is only sent to the client. Support for logging notices will be added in a future release. #106351 +- The `public` pseudo-role now receives the `EXECUTE` privilege by default for all user-defined functions that are created. This can be adjusted by using `ALTER DEFAULT PRIVILEGES`. #107317 +- The `crdb_interanal.node_statement_statistics` table redacts the error message if the user has the `VIEWACTIVITYREDACTED` privilege, and does not redact the error message if the user has `VIEWACTIVITY`. If the user has both, `VIEWACTIVITYREDACTED` takes precedence and the last error is redacted. #107076 +- The `crdb_internal.cluster_locks` table now has a `isolation_level` column indicating the isolation level. #107309 +- In `CommonSQLExecDetails`, which is emitted as part of the SQL audit logs, SQL exec logs, and telemetry events, there is a new field: `StmtPosInTxn`. It represents the statement's index in the transaction, starting at 1. #107081 +- `cluster_logical_timestamp` now returns an error when called at isolation levels lower than `SERIALIZABLE`. #107090 - `EXPLAIN ANALYZE` output now includes: - The isolation level of the statement's transaction. - The priority of the statement's transaction. - The quality of service level of the statement's transaction. - [#105857][#105857] - -- Added a new session variable, `enable_implicit_fk_locking_for_serializable`, which controls locking during foreign key checks under `SERIALIZABLE` isolation. With this set to `true`, foreign key checks of the referenced (parent) table, such as those performed during an `INSERT` or `UPDATE` of the referencing (child) table, will lock the referenced row using `SELECT FOR SHARE` locking. This is somewhat analogous to the existing `enable_implicit_select_for_update` variable but applies to the foreign key checks of a mutation statement instead of the initial row fetch. Under weaker isolation levels such as read committed, `SELECT FOR SHARE` locking will always be used to ensure the database maintains the foreign key constraint, regardless of the current setting of `enable_implicit_fk_locking_for_serializable`. [#105857][#105857] -- Add a new session variable, `enable_durable_locking_for_serializable`, which controls locking durability under `SERIALIZABLE` isolation. With this set to true, `SELECT FOR UPDATE` locks, `SELECT FOR SHARED` locks, and constraint check locks (e.g., locks acquired during foreign key checks if `enable_implicit_fk_locking_for_serializable` is set to `true`) will be guaranteed-durable under serializable isolation, meaning they will always be held to transaction commit. These locks are always guaranteed-durable under weaker isolation levels. By default, under serializable isolation these locks are best-effort rather than guaranteed-durable, meaning in some cases (e.g., leaseholder transfer, node loss, etc.) they could be released before the transaction commits. Serializable isolation does not rely on locking for correctness, only using it to improve performance under contention, so this default is a deliberate choice to avoid the performance overhead of lock replication. [#107749][#107749] -- The cluster setting `server.cpu_profile.enabled` has been removed. `server.cpu_profile.cpu_usage_combined_threshold` can enable and disable CPU profiling. [#107717][#107717] -- Added support for `CONSTANT` variable declarations in PLpgSQL routines. Any assignment to a variable declared with the `CONSTANT` keyword will raise a compile-time error. [#107682][#107682] -- Added a new syntax to `SHOW DEFAULT PRIVILEGES`, `SHOW DEFAULT PRIVILEGES FOR GRANTEE `, that shows the default privileges that a grantee received. [#107953][#107953] -- The Statement diagnostics feature has been extended to support collecting a bundle for a particular plan. Namely, the existing fingerprint-based matching has been extended to also include plan-gist-based matching. Such bundles will miss a couple of things: `plan.txt` file as well as the tracing of the optimizer. At the moment, the feature is only exposed via an overload to the `crdb_internal.request_statement_bundle` built-in function. We now also support "anti-match": collecting a bundle for any plan other than the provided plan gist. [#105477][#105477] -- [`SHOW BACKUP`]({% link v23.2/show-backup.md %})'s timestamp columns are now `TIMESTAMPTZ`, meaning they render in the session offset. [#108290][#108290] -- Attempting to [drop a column]({% link v23.2/alter-table.md %}#drop-column) when safe updates are enabled (`sql_safe_updates = on`) now additionally warns users that indexes referencing that column will be automatically dropped. [#108047][#108047] -- `NOTICE`s are now emitted for each index dropped by an `ALTER TABLE ... DROP COLUMN ...` statement. [#108047][#108047] -- `SHOW JOBS` now returns times (`created`, `last_run`, and so on) using the `TIMESTAMPTZ` column type instead of the `TIMESTAMP` type, meaning they are now rendered using the session offset. [#108353][#108353] -- Added a cluster setting `sql.schema.force_declarative_statements` to enable/disable DDL in the [declarative schema changer]({% link v23.2/online-schema-changes.md %}). [#107815][#107815] -- Added the new built-in functions `workload_index_recs()` and `workload_index_recs(TIMESTAMPTZ)`, which return workload level index recommendations (columns of string, each string represent an index recommendation) from statement level index recommendations (as candidates) in `system.statement_statistics`. If the `TIMESTAMPTZ` is given, it will only consider those candidates generated after that `TIMESTAMPTZ` value. [#106525][#106525] -- Added support for specifying PLpgSQL `IF` statements with `ELSIF` branches. [#108211][#108211] -- The admin API database details endpoint now returns authoritative range statistics. [#108037][#108037] -- Added the `max_retries_for_read_committed` session variable. It defaults to 10, and determines the number of times an individual statement in an explicit `READ COMMITTED` transaction will be retried if it encounters a retryable transaction error. [#107044][#107044] -- Added support for the execution of PLpgSQL functions with exception blocks. This allows a PLpgSQL function to catch and handle arbitrary errors it encounters during its execution. [#107601][#107601] -- Added the built-in functions `bitmask_or`, `bitmask_and` and `bitmask_xor` for variable-length input bitwise `OR`, `AND`, and `XOR` operations, respectively. [#107863][#107863] -- The `oidvectortypes` built-in has been implemented, which can format `oidvector`. [#108467][#108467] -- Added support for executing SQL statements directly within PLpgSQL routines. Note that this currently only applies to the subset of statements that can be executed within SQL UDFs, so `CREATE TABLE` is not supported, for example. `INTO` syntax is also supported. For example, `SELECT * INTO a, b FROM xy;`. [#107920][#107920] -- A SQL client can now request strict atomicity for mixed DDL/DML transactions with the new session variable `strict_ddl_atomicity`, which defaults to `false`. When this variable is set to `true`, CockroachDB will refuse to accept processing those specific DDL statements inside `BEGIN...COMMIT` for which it cannot guarantee atomic processing (other DDL statements are still allowed). Note that schema changes implicit in certain operations (e.g., `IMPORT`) are not protected via the new mechanism and can still fail with `XXA00` errors. [#42063][#42063] -- Fixed an issue where the UI was missing query text and details on the SQL Activity [Transactions page]({% link v23.2/ui-transactions-page.md %}) if there were more than 500 transactions or statements. The `statement_activity` table now includes all statements for a transaction that are in the `transaction_activity` table. [#109424][#109424] -- Added the [`VIEWSYSTEMTABLE` system privilege]({% link v23.2/grant.md %}). Users with this privilege have `SELECT` privileges for all tables in the system database. [#109474][#109474] -- The `statement_activity` and `transaction_activity` tables column `execution_total_cluster_seconds` is now accurate. The `combinedstmts` endpoint returns the correct value for the `StmtsTotalRuntimeSecs` and `TxnsTotalRuntimeSecs` properties. [#109592][#109592] -- The `persistedsqlstats` table maximum size check is now done once an hour instead of every 10 minutes. This reduces the risk of serialization errors on the statistics tables. [#109696][#109696] -- The deprecated session variable `idle_in_session_timeout` is now hidden from introspection. It was previously changed to `idle_session_timeout`. [#109872][#109872] -- The session variable `ssl` is now visible through introspection for better compatibility with PostgreSQL. [#109872][#109872] -- The session variable `session_user` is now invisible through introspection, in a way consistent with `session_authorization` and PostgreSQL. [#109872][#109872] -- There is now a `CREATEROLE` system privilege, which is analogous to the existing `CREATEROLE` role option, but can also be inherited by role membership. [#109258][#109258] -- Added the `gen_random_bytes` built-in function, which generates cryptographically secure random bytes. [#110107][#110107] -- The hash function used by [hash-sharded indexes]({% link v23.2/hash-sharded-indexes.md %}) was changed to `mod(fnv32(md5(crdb_internal.datums_to_bytes(columns))), bucket_count)`. Previously, it did not use `md5`. This change was made to enhance the uniformity of bucket distribution in cases when the bucket count is a power of 2, and the columns being sharded have numerical properties that make the `fnv32` function return values with a non-uniformly distributed modulus. [#109374][#109374] -- New datetime built-ins (`make_date`, `make_timestamp`, and `make_timestamptz`) have been added, allowing for the creation of timestamps, timestamps with time zones, and dates. In addition, `date_trunc` now allows for a timestamp to be truncated in a specified timezone (to a specified precision). [#108824][#108824] -- There is now a `CREATELOGIN` system privilege, which is analogous to the existing `CREATELOGIN` role option, but can also be inherited by role membership. [#110220][#110220] -- There is now a `CREATEDB` system privilege, which is analogous to the existing `CREATEDB` role option, but can also be inherited by role membership. [#110220][#110220] -- There is now a `CONTROLJOB` system privilege, which is analogous to the existing `CONTROLJOB` role option, but can also be inherited by role membership. [#110220][#110220] -- The `persistedsqlstats` table maximum size check is now done once an hour instead of every 10 minutes. This reduces the risk of serialization errors on the statistics tables. [#110173][#110173] + #105857 + +- Added a new session variable, `enable_implicit_fk_locking_for_serializable`, which controls locking during foreign key checks under `SERIALIZABLE` isolation. With this set to `true`, foreign key checks of the referenced (parent) table, such as those performed during an `INSERT` or `UPDATE` of the referencing (child) table, will lock the referenced row using `SELECT FOR SHARE` locking. This is somewhat analogous to the existing `enable_implicit_select_for_update` variable but applies to the foreign key checks of a mutation statement instead of the initial row fetch. Under weaker isolation levels such as read committed, `SELECT FOR SHARE` locking will always be used to ensure the database maintains the foreign key constraint, regardless of the current setting of `enable_implicit_fk_locking_for_serializable`. #105857 +- Add a new session variable, `enable_durable_locking_for_serializable`, which controls locking durability under `SERIALIZABLE` isolation. With this set to true, `SELECT FOR UPDATE` locks, `SELECT FOR SHARED` locks, and constraint check locks (e.g., locks acquired during foreign key checks if `enable_implicit_fk_locking_for_serializable` is set to `true`) will be guaranteed-durable under serializable isolation, meaning they will always be held to transaction commit. These locks are always guaranteed-durable under weaker isolation levels. By default, under serializable isolation these locks are best-effort rather than guaranteed-durable, meaning in some cases (e.g., leaseholder transfer, node loss, etc.) they could be released before the transaction commits. Serializable isolation does not rely on locking for correctness, only using it to improve performance under contention, so this default is a deliberate choice to avoid the performance overhead of lock replication. #107749 +- The cluster setting `server.cpu_profile.enabled` has been removed. `server.cpu_profile.cpu_usage_combined_threshold` can enable and disable CPU profiling. #107717 +- Added support for `CONSTANT` variable declarations in PLpgSQL routines. Any assignment to a variable declared with the `CONSTANT` keyword will raise a compile-time error. #107682 +- Added a new syntax to `SHOW DEFAULT PRIVILEGES`, `SHOW DEFAULT PRIVILEGES FOR GRANTEE `, that shows the default privileges that a grantee received. #107953 +- The Statement diagnostics feature has been extended to support collecting a bundle for a particular plan. Namely, the existing fingerprint-based matching has been extended to also include plan-gist-based matching. Such bundles will miss a couple of things: `plan.txt` file as well as the tracing of the optimizer. At the moment, the feature is only exposed via an overload to the `crdb_internal.request_statement_bundle` built-in function. We now also support "anti-match": collecting a bundle for any plan other than the provided plan gist. #105477 +- [`SHOW BACKUP`]({% link v23.2/show-backup.md %})'s timestamp columns are now `TIMESTAMPTZ`, meaning they render in the session offset. #108290 +- Attempting to [drop a column]({% link v23.2/alter-table.md %}#drop-column) when safe updates are enabled (`sql_safe_updates = on`) now additionally warns users that indexes referencing that column will be automatically dropped. #108047 +- `NOTICE`s are now emitted for each index dropped by an `ALTER TABLE ... DROP COLUMN ...` statement. #108047 +- `SHOW JOBS` now returns times (`created`, `last_run`, and so on) using the `TIMESTAMPTZ` column type instead of the `TIMESTAMP` type, meaning they are now rendered using the session offset. #108353 +- Added a cluster setting `sql.schema.force_declarative_statements` to enable/disable DDL in the [declarative schema changer]({% link v23.2/online-schema-changes.md %}). #107815 +- Added the new built-in functions `workload_index_recs()` and `workload_index_recs(TIMESTAMPTZ)`, which return workload level index recommendations (columns of string, each string represent an index recommendation) from statement level index recommendations (as candidates) in `system.statement_statistics`. If the `TIMESTAMPTZ` is given, it will only consider those candidates generated after that `TIMESTAMPTZ` value. #106525 +- Added support for specifying PLpgSQL `IF` statements with `ELSIF` branches. #108211 +- The admin API database details endpoint now returns authoritative range statistics. #108037 +- Added the `max_retries_for_read_committed` session variable. It defaults to 10, and determines the number of times an individual statement in an explicit `READ COMMITTED` transaction will be retried if it encounters a retryable transaction error. #107044 +- Added support for the execution of PLpgSQL functions with exception blocks. This allows a PLpgSQL function to catch and handle arbitrary errors it encounters during its execution. #107601 +- Added the built-in functions `bitmask_or`, `bitmask_and` and `bitmask_xor` for variable-length input bitwise `OR`, `AND`, and `XOR` operations, respectively. #107863 +- The `oidvectortypes` built-in has been implemented, which can format `oidvector`. #108467 +- Added support for executing SQL statements directly within PLpgSQL routines. Note that this currently only applies to the subset of statements that can be executed within SQL UDFs, so `CREATE TABLE` is not supported, for example. `INTO` syntax is also supported. For example, `SELECT * INTO a, b FROM xy;`. #107920 +- A SQL client can now request strict atomicity for mixed DDL/DML transactions with the new session variable `strict_ddl_atomicity`, which defaults to `false`. When this variable is set to `true`, CockroachDB will refuse to accept processing those specific DDL statements inside `BEGIN...COMMIT` for which it cannot guarantee atomic processing (other DDL statements are still allowed). Note that schema changes implicit in certain operations (e.g., `IMPORT`) are not protected via the new mechanism and can still fail with `XXA00` errors. #42063 +- Fixed an issue where the UI was missing query text and details on the SQL Activity [Transactions page]({% link v23.2/ui-transactions-page.md %}) if there were more than 500 transactions or statements. The `statement_activity` table now includes all statements for a transaction that are in the `transaction_activity` table. #109424 +- Added the [`VIEWSYSTEMTABLE` system privilege]({% link v23.2/grant.md %}). Users with this privilege have `SELECT` privileges for all tables in the system database. #109474 +- The `statement_activity` and `transaction_activity` tables column `execution_total_cluster_seconds` is now accurate. The `combinedstmts` endpoint returns the correct value for the `StmtsTotalRuntimeSecs` and `TxnsTotalRuntimeSecs` properties. #109592 +- The `persistedsqlstats` table maximum size check is now done once an hour instead of every 10 minutes. This reduces the risk of serialization errors on the statistics tables. #109696 +- The deprecated session variable `idle_in_session_timeout` is now hidden from introspection. It was previously changed to `idle_session_timeout`. #109872 +- The session variable `ssl` is now visible through introspection for better compatibility with PostgreSQL. #109872 +- The session variable `session_user` is now invisible through introspection, in a way consistent with `session_authorization` and PostgreSQL. #109872 +- There is now a `CREATEROLE` system privilege, which is analogous to the existing `CREATEROLE` role option, but can also be inherited by role membership. #109258 +- Added the `gen_random_bytes` built-in function, which generates cryptographically secure random bytes. #110107 +- The hash function used by [hash-sharded indexes]({% link v23.2/hash-sharded-indexes.md %}) was changed to `mod(fnv32(md5(crdb_internal.datums_to_bytes(columns))), bucket_count)`. Previously, it did not use `md5`. This change was made to enhance the uniformity of bucket distribution in cases when the bucket count is a power of 2, and the columns being sharded have numerical properties that make the `fnv32` function return values with a non-uniformly distributed modulus. #109374 +- New datetime built-ins (`make_date`, `make_timestamp`, and `make_timestamptz`) have been added, allowing for the creation of timestamps, timestamps with time zones, and dates. In addition, `date_trunc` now allows for a timestamp to be truncated in a specified timezone (to a specified precision). #108824 +- There is now a `CREATELOGIN` system privilege, which is analogous to the existing `CREATELOGIN` role option, but can also be inherited by role membership. #110220 +- There is now a `CREATEDB` system privilege, which is analogous to the existing `CREATEDB` role option, but can also be inherited by role membership. #110220 +- There is now a `CONTROLJOB` system privilege, which is analogous to the existing `CONTROLJOB` role option, but can also be inherited by role membership. #110220 +- The `persistedsqlstats` table maximum size check is now done once an hour instead of every 10 minutes. This reduces the risk of serialization errors on the statistics tables. #110173 - The new [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.txn.read_committed_syntax.enabled`, controls whether transactions run under `READ COMMITTED` or `SERIALIZABLE` isolation. It defaults to `false`. When set to `true`, the following statements will configure transactions to run under `READ COMMITTED` isolation: - `BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED` @@ -198,17 +198,17 @@ Release Date: September 26, 2023 - `SET default_transaction_isolation = 'read committed'` - `SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED` - [#110624][#110624] + #110624 -- The [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.metrics.statement_details.gateway_node.enabled` now defaults to false, to reduce the number of rows generated in SQL Statistics pages. [#107788][#107788] -- The default value for the [`ttl_job_cron`](https://cockroachlabs.com/docs/v23.2/row-level-ttl) table storage parameter is now `@daily` rather than `@hourly`. This parameter controls the default recurrence of the row-level TTL job. As part of this change, the output of the `SHOW CREATE TABLE` statements now include the `ttl_cron_job` parameter only if it is explicitly set. [#110623][#110623] +- The [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.metrics.statement_details.gateway_node.enabled` now defaults to false, to reduce the number of rows generated in SQL Statistics pages. #107788 +- The default value for the [`ttl_job_cron`](https://cockroachlabs.com/docs/v23.2/row-level-ttl) table storage parameter is now `@daily` rather than `@hourly`. This parameter controls the default recurrence of the row-level TTL job. As part of this change, the output of the `SHOW CREATE TABLE` statements now include the `ttl_cron_job` parameter only if it is explicitly set. #110623

Operational changes

-- Removed a timeseries metric that has not been reported for several versions. [#100524][#100524] -- Added two new metrics, `range.snapshots.(send|recv)-queue-bytes`, to track the total size of all snapshots waiting in the snapshot queue. [#100942][#100942] -- Exposed a new metric `storage.compactions.duration`, computed by the storage engine, that provides the cumulative time the storage engine has spent in compactions. This duration may exceed time elapsed, because of concurrent compactions, and may be useful in monitoring compaction concurrency. [#103670][#103670] -- Two new store metrics, `range.snapshots.cross-region.sent-bytes` and `range.snapshots.cross-region.rcvd-bytes`, were added to track the aggregate of snapshot bytes sent from and received at a store across different regions. Note that these metrics require the nodes' localities to include a “region” tier key. If a node lacks this key but is involved in cross-region batch activities, an error message will be logged. [#104111][#104111] +- Removed a timeseries metric that has not been reported for several versions. #100524 +- Added two new metrics, `range.snapshots.(send|recv)-queue-bytes`, to track the total size of all snapshots waiting in the snapshot queue. #100942 +- Exposed a new metric `storage.compactions.duration`, computed by the storage engine, that provides the cumulative time the storage engine has spent in compactions. This duration may exceed time elapsed, because of concurrent compactions, and may be useful in monitoring compaction concurrency. #103670 +- Two new store metrics, `range.snapshots.cross-region.sent-bytes` and `range.snapshots.cross-region.rcvd-bytes`, were added to track the aggregate of snapshot bytes sent from and received at a store across different regions. Note that these metrics require the nodes' localities to include a “region” tier key. If a node lacks this key but is involved in cross-region batch activities, an error message will be logged. #104111 - Added new store metrics to track the aggregate of snapshot bytes sent from and received at a store across different zones. - `range.snapshots.cross-zone.sent-bytes` @@ -220,7 +220,7 @@ Release Date: September 26, 2023 - Within a node locality, ensure unique region and zone tier keys. - Maintain consistent configuration of region and zone tiers across nodes. - [#104417][#104417] + #104417 - Added new store metrics: @@ -231,7 +231,7 @@ Release Date: September 26, 2023 - `raft.rcvd.cross_zone.bytes` - `raft.sent.cross_zone.bytes` - [#105122][#105122] + #105122 - Added new DistSender metrics: @@ -242,7 +242,7 @@ Release Date: September 26, 2023 - `distsender.batch_requests.cross_zone.bytes` - `distsender.batch_responses.cross_zone.bytes`. - [#103963][#103963] + #103963 - Added new Node metrics: @@ -253,7 +253,7 @@ Release Date: September 26, 2023 - `batch_requests.cross_zone.bytes` - `batch_responses.cross_zone.bytes` - [#104585][#104585] + #104585 - Added new RPC metrics to help you to diagnose RPC connection issues: @@ -265,41 +265,41 @@ Release Date: September 26, 2023 - `rpc.connection.unhealthy` - `rpc.connection.unhealthy_nanos` - [#99191][#99191] - -- Added a new metric `changefeed.lagging_ranges` that shows the number of ranges which are behind in changefeeds. This metric can be used with the `metrics_label` changefeed option. Added a new [changefeed option](https://cockroachlabs.com/docs/v23.2/create-changefeed) `lagging_ranges_threshold`, which is the amount of time a range needs to be behind to be considered lagging. By default this is 3 minutes. Added a new option `lagging_ranges_polling_interval`, which controls how often the lagging ranges calculation is done. This setting defaults to polling every 1 minute. Note that polling adds latency to the metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update for an additional minute afterwards. Also note that ranges undergoing an initial scan for longer than the threshold are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. However, as ranges complete the initial scan, the number of ranges will decrease. [#109835][#109835] -- A histogram metric `raft.replication.latency` was added. It tracks the time between evaluation and application of the command. This includes time spent in the quota pool, in replication (including re-proposals) as well as log application, but notably *not* sequencing latency (i.e., contention and latch acquisition). [#106094][#106094] -- The default Raft scheduler concurrency cap has been increased from 96 to 128 workers, scaling with 8 workers per CPU up to the cap. The scheduler concurrency can be controlled using the `COCKROACH_SCHEDULER_CONCURRENCY` environment variable. [#105521][#105521] -- The new cluster setting `server.hot_ranges_request.node.timeout` controls the maximum amount of time that a hot ranges request will spend waiting for a node to provide a response. It defaults to 5 minutes. To disable timeouts, set it to `0`. [#107796][#107796] -- Two new cluster settings control whether intent resolution is subject to admission control: `kv.intent_resolver.send_immediately.bypass_admission_control.enabled` and `kv.intent_resolver.batch.bypass_admission_control.enabled`. [#109932][#109932] -- The new cluster setting `admission.l0_min_size_per_sub_level` reduces the probability of [admission control]({% link v23.2/admission-control.md %}) throttling when there is a sequence of small `memtable` flushes or small files ingested into L0. [#109332][#109332] -- The new cluster setting `kv.intent_resolver.batcher.in_flight_backpressure_limit.enabled` controls whether an in-flight RPC limit is enforced on intent resolution RPCs. It defaults to `false`. [#109899][#109899] -- [`BACKUP`]({% link v23.2/backup.md %}) now skips contacting the ranges for tables on which `exclude_data_from_backup` is set, and can thus succeed even if an excluded table is unavailable. [#108627][#108627] -- Span stats requests will return a partial result if the request encounters any errors. Errors that would have previously terminated the request are now included in the response. [#108456][#108456] -- The rangefeed closed timestamp interval controlled by `kv.rangefeed.closed_timestamp_refresh_interval` now defaults to 3 seconds. This affects how often rangefeeds emit resolved timestamps, and thus how often changefeeds can emit checkpoints. Previously, its default value of 0 would fall back to `kv.closed_timestamp.side_transport_interval`, which defaults to 200 milliseconds. Users who rely on the setting `kv.closed_timestamp.side_transport_interval` to control the rangefeed closed timestamp interval should make sure they either set `kv.rangefeed.closed_timestamp_refresh_interval` to 0 to retain the old behavior (preferably before upgrading), or to an appropriate value. [#108667][#108667] -- The default value of `timeout` for `http-servers` [logging sinks]({% link v23.2/configure-logs.md %}#configure-log-sinks) has been changed from `0` (i.e., "no timeout") to `2s`. This is reflected in the `http-defaults` section of the log configuration. Users still maintain the ability to override the timeout, or disable it by explicitly setting it to `0` (e.g. `timeout: 0`). [#109264][#109264] -- [Changefeed]({% link v23.2/change-data-capture-overview.md %}) metrics now include a `changefeed.checkpoint_progress` metric which is similar to `changefeed.max_behind_nanos` but supports metrics labels, as well as a `changefeed.aggregator_progress` metric which can track the progress of individual aggregators (the lowest timestamp for which all aggregators with the label have emitted all values they're responsible for). [#108757][#108757] -- Added support for Prometheus native histograms behind an environment variable flag. [#104302][#104302] -- Requests for database details or table details from the UI, or usages of [`SHOW RANGES WITH DETAILS`]{% link v23.2/show-ranges.md %} are no longer subject to errors if the number of requested spans is too large. [#109464][#109464] -- The [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) command now has an option to omit goroutine stack dumps. This impacts the creation of `nodes/*/stacks.txt` and `nodes/*/stacks_with_labels.txt` within debug ZIP bundles. Users can opt to exclude these goroutine stacks by using the `--include-goroutine-stacks=false` flag. Note that fetching stack traces for all goroutines is a "stop-the-world" operation, which can momentarily have negative impacts on SQL service latency. Note also that any periodic goroutine dumps previously taken on the node will still be included in `nodes/*/goroutines/*.txt.gz`, as these would have already been generated and don't require any stop-the-world operations. [#110177][#110177] -- New rangefeed metrics help to troubleshoot rangefeed restarts. The metric names have the format `distsender.rangefeed.retry.{reason}`. [#109346][#109346] -- Rangefeeds regularly attempt to push long-running transactions to a future timestamp in order to emit checkpoints. The interval at which this is attempted has been increased from 250 milliseconds to 1 seconds. This is now configurable via the environment variable `COCKROACH_RANGEFEED_PUSH_TXNS_INTERVAL`. [#110332][#110332] + #99191 + +- Added a new metric `changefeed.lagging_ranges` that shows the number of ranges which are behind in changefeeds. This metric can be used with the `metrics_label` changefeed option. Added a new [changefeed option](https://cockroachlabs.com/docs/v23.2/create-changefeed) `lagging_ranges_threshold`, which is the amount of time a range needs to be behind to be considered lagging. By default this is 3 minutes. Added a new option `lagging_ranges_polling_interval`, which controls how often the lagging ranges calculation is done. This setting defaults to polling every 1 minute. Note that polling adds latency to the metric being updated. For example, if a range falls behind by 3 minutes, the metric may not update for an additional minute afterwards. Also note that ranges undergoing an initial scan for longer than the threshold are considered to be lagging. Starting a changefeed with an initial scan on a large table will likely increment the metric for each range in the table. However, as ranges complete the initial scan, the number of ranges will decrease. #109835 +- A histogram metric `raft.replication.latency` was added. It tracks the time between evaluation and application of the command. This includes time spent in the quota pool, in replication (including re-proposals) as well as log application, but notably *not* sequencing latency (i.e., contention and latch acquisition). #106094 +- The default Raft scheduler concurrency cap has been increased from 96 to 128 workers, scaling with 8 workers per CPU up to the cap. The scheduler concurrency can be controlled using the `COCKROACH_SCHEDULER_CONCURRENCY` environment variable. #105521 +- The new cluster setting `server.hot_ranges_request.node.timeout` controls the maximum amount of time that a hot ranges request will spend waiting for a node to provide a response. It defaults to 5 minutes. To disable timeouts, set it to `0`. #107796 +- Two new cluster settings control whether intent resolution is subject to admission control: `kv.intent_resolver.send_immediately.bypass_admission_control.enabled` and `kv.intent_resolver.batch.bypass_admission_control.enabled`. #109932 +- The new cluster setting `admission.l0_min_size_per_sub_level` reduces the probability of [admission control]({% link v23.2/admission-control.md %}) throttling when there is a sequence of small `memtable` flushes or small files ingested into L0. #109332 +- The new cluster setting `kv.intent_resolver.batcher.in_flight_backpressure_limit.enabled` controls whether an in-flight RPC limit is enforced on intent resolution RPCs. It defaults to `false`. #109899 +- [`BACKUP`]({% link v23.2/backup.md %}) now skips contacting the ranges for tables on which `exclude_data_from_backup` is set, and can thus succeed even if an excluded table is unavailable. #108627 +- Span stats requests will return a partial result if the request encounters any errors. Errors that would have previously terminated the request are now included in the response. #108456 +- The rangefeed closed timestamp interval controlled by `kv.rangefeed.closed_timestamp_refresh_interval` now defaults to 3 seconds. This affects how often rangefeeds emit resolved timestamps, and thus how often changefeeds can emit checkpoints. Previously, its default value of 0 would fall back to `kv.closed_timestamp.side_transport_interval`, which defaults to 200 milliseconds. Users who rely on the setting `kv.closed_timestamp.side_transport_interval` to control the rangefeed closed timestamp interval should make sure they either set `kv.rangefeed.closed_timestamp_refresh_interval` to 0 to retain the old behavior (preferably before upgrading), or to an appropriate value. #108667 +- The default value of `timeout` for `http-servers` [logging sinks]({% link v23.2/configure-logs.md %}#configure-log-sinks) has been changed from `0` (i.e., "no timeout") to `2s`. This is reflected in the `http-defaults` section of the log configuration. Users still maintain the ability to override the timeout, or disable it by explicitly setting it to `0` (e.g. `timeout: 0`). #109264 +- [Changefeed]({% link v23.2/change-data-capture-overview.md %}) metrics now include a `changefeed.checkpoint_progress` metric which is similar to `changefeed.max_behind_nanos` but supports metrics labels, as well as a `changefeed.aggregator_progress` metric which can track the progress of individual aggregators (the lowest timestamp for which all aggregators with the label have emitted all values they're responsible for). #108757 +- Added support for Prometheus native histograms behind an environment variable flag. #104302 +- Requests for database details or table details from the UI, or usages of [`SHOW RANGES WITH DETAILS`]{% link v23.2/show-ranges.md %} are no longer subject to errors if the number of requested spans is too large. #109464 +- The [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) command now has an option to omit goroutine stack dumps. This impacts the creation of `nodes/*/stacks.txt` and `nodes/*/stacks_with_labels.txt` within debug ZIP bundles. Users can opt to exclude these goroutine stacks by using the `--include-goroutine-stacks=false` flag. Note that fetching stack traces for all goroutines is a "stop-the-world" operation, which can momentarily have negative impacts on SQL service latency. Note also that any periodic goroutine dumps previously taken on the node will still be included in `nodes/*/goroutines/*.txt.gz`, as these would have already been generated and don't require any stop-the-world operations. #110177 +- New rangefeed metrics help to troubleshoot rangefeed restarts. The metric names have the format `distsender.rangefeed.retry.{reason}`. #109346 +- Rangefeeds regularly attempt to push long-running transactions to a future timestamp in order to emit checkpoints. The interval at which this is attempted has been increased from 250 milliseconds to 1 seconds. This is now configurable via the environment variable `COCKROACH_RANGEFEED_PUSH_TXNS_INTERVAL`. #110332

Cluster virtualization

When cluster virtualization is enabled: -- A selection box displays in DB Console Metrics pages when you are connected to the system virtual cluster, and allows you to view metrics for a specific virtual cluster. [#103308][#103308] -- A "no data" empty graph state has been added when switching to a virtual cluster with no data. [#103971][#103971] -- A selection box displays on custom charts in the DB Console and allows you to select a specific virtual cluster. [#103780][#103780] -- The name of the virtual cluster, when known, is now reported in logging events. [#108807][#108807] -- When `cockroach debug zip` is run for a cluster with virtualization enabled, data about virtual clusters is now stored in a `virtual` subdirectory rather than a `tenants` subdirectory. [#106117][#106117] -- When cluster virtualization is enabled, the following closed timestamp side-transport settings can be set only from the system virtual cluster: `kv.closed_timestamp.target_duration`, `kv.closed_timestamp.side_transport_interval`, and `kv.closed_timestamp.lead_for_global_reads_override`. [#108678][#108678] +- A selection box displays in DB Console Metrics pages when you are connected to the system virtual cluster, and allows you to view metrics for a specific virtual cluster. #103308 +- A "no data" empty graph state has been added when switching to a virtual cluster with no data. #103971 +- A selection box displays on custom charts in the DB Console and allows you to select a specific virtual cluster. #103780 +- The name of the virtual cluster, when known, is now reported in logging events. #108807 +- When `cockroach debug zip` is run for a cluster with virtualization enabled, data about virtual clusters is now stored in a `virtual` subdirectory rather than a `tenants` subdirectory. #106117 +- When cluster virtualization is enabled, the following closed timestamp side-transport settings can be set only from the system virtual cluster: `kv.closed_timestamp.target_duration`, `kv.closed_timestamp.side_transport_interval`, and `kv.closed_timestamp.lead_for_global_reads_override`. #108678

Command-line changes

-- The CLI commands that output SQL data now support the JSON output format (`--format=json`), in addition to newline-delimited JSON (ND-JSON, `--format=ndjson`) that had been supported since v22.2. [#102595][#102595] -- [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) now supports the command-line flag `--format` to select the format used to store SQL table data, in the same way as [`cockroach sql`]({% link v23.2/cockroach-sql.md %}). In contrast to `cockroach sql` however, its default value is `json` (resulting in files named `.json`) and the default is not dependent on whether the terminal is interactive. [#102607][#102607] +- The CLI commands that output SQL data now support the JSON output format (`--format=json`), in addition to newline-delimited JSON (ND-JSON, `--format=ndjson`) that had been supported since v22.2. #102595 +- [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) now supports the command-line flag `--format` to select the format used to store SQL table data, in the same way as [`cockroach sql`]({% link v23.2/cockroach-sql.md %}). In contrast to `cockroach sql` however, its default value is `json` (resulting in files named `.json`) and the default is not dependent on whether the terminal is interactive. #102607 - The SQL shell now supports argument quoting for client-side commands in a similar way to `psql`: inside single quotes, `\` can escape characters and recognize octal/hexadecimal sequences; and inside double quotes characters are passed through. The quote characters themselves, when doubled, result in themselves as part of the string. For example, the following commands both result in a SQL prompt that says `go "world"`: @@ -319,7 +319,7 @@ When cluster virtualization is enabled: These quoting rules are similar to PostgreSQL, but are different from the rules used by POSIX shells and of other programming languages like Python or Go. For example, octal and hex escape sequences support a variable number of digits, and double quoted strings preserve the surrounding quotes. When in doubt, refer to the PostgreSQL documentation. - [#104610][#104610] + #104610 - The configuration for log output sinks now accepts a new `format-options` field. This can be used to customize the output of a given format. Each format accepts different options. One available option for the `json` output format is `datetime-format`. @@ -341,7 +341,7 @@ When cluster virtualization is enabled: - `iso8601` / `rfc3339`: format the time stamp like "2006-01-02T15:04:05.999999999Z". - `rfc1123`: format the time stamp like "Mon, 02 Jan 2006 15:04:05 +0000". - Enabling the `datetime` field introduces CPU overhead and is not recommended. When using output to a log collector such as Fluent or Datadog, the log collector can be configured to transform the timestamp provided by CockroachDB without requiring participation from CockroachDB itself. When inspecting a log file containing JSON output produced by CockroachDB, the command `cockroach debug merge-log` can consume the JSON data and reformat it using the `crdb-v2` format which also includes the date and time using the RFC3339 format. [#104265][#104265] + Enabling the `datetime` field introduces CPU overhead and is not recommended. When using output to a log collector such as Fluent or Datadog, the log collector can be configured to transform the timestamp provided by CockroachDB without requiring participation from CockroachDB itself. When inspecting a log file containing JSON output produced by CockroachDB, the command `cockroach debug merge-log` can consume the JSON data and reformat it using the `crdb-v2` format which also includes the date and time using the RFC3339 format. #104265 - The `json` log output format now recognizes the extra format option `datetime-timezone` which selects which timezone to use when formatting the `datetime` field. `datetime-timezone` must be combined with `datetime-format` because the default value for the latter option is `none` (i.e., `datetime` is not produced by default). For example: @@ -354,11 +354,11 @@ When cluster virtualization is enabled: format-options: {datetime-format: rfc3339, datetime-timezone: America/New_York} ~~~ - [#104265][#104265] + #104265 -- The `json` log format now recognizes the format options `tag-style` and `fluent-tag`. The existing formats `json-compact`, `json-fluent`, `json-fluent-compact` have been redefined to become aliases for `json` with different defaults for the two new options. [#104265][#104265] -- The `crdb-v1` log format now recognizes the format options `show-counter` and `colors`. The existing formats `crdb-v1-tty`, `crdb-v1-count`, `crdb-v1-tty-count` have been redefined to become aliases for `crdb-v1` with different defaults for the two new options. [#104265][#104265] -- The `crdb-v2` log format now recognizes the format option `colors`. The existing formats `crdb-v2-tty` has been redefined to become aliases for `crdb-v2` with a different default for the new option. [#104265][#104265] +- The `json` log format now recognizes the format options `tag-style` and `fluent-tag`. The existing formats `json-compact`, `json-fluent`, `json-fluent-compact` have been redefined to become aliases for `json` with different defaults for the two new options. #104265 +- The `crdb-v1` log format now recognizes the format options `show-counter` and `colors`. The existing formats `crdb-v1-tty`, `crdb-v1-count`, `crdb-v1-tty-count` have been redefined to become aliases for `crdb-v1` with different defaults for the two new options. #104265 +- The `crdb-v2` log format now recognizes the format option `colors`. The existing formats `crdb-v2-tty` has been redefined to become aliases for `crdb-v2` with a different default for the new option. #104265 - The log output formats `crdb-v1` and `crdb-v2` now support the format option `timezone`. When specified, the corresponding time zone is used to produce the timestamp column. For example: {% include_cached copy-clipboard.html %} @@ -374,15 +374,15 @@ When cluster virtualization is enabled: I230606 12:43:01.553407-040000 1 1@cli/start.go:575 ⋮ [n?] 4 soft memory limit of Go runtime is set to 35 GiB ^^^^^^^ indicates GMT-4 was used ~~~ - The timezone offset is also always included in the format if it is not zero (e.g., for non-UTC time zones). This is necessary to ensure that the times can be read back precisely. [#104265][#104265] + The timezone offset is also always included in the format if it is not zero (e.g., for non-UTC time zones). This is necessary to ensure that the times can be read back precisely. #104265 -- The command `cockroach debug merge-log` was adapted to understand time zones in input files read with format `crdb-v1` or `crdb-v2`. [#104265][#104265] -- When customizing the SQL interactive prompt, `%M` and `%m` now behave more like `psql` when connecting over a Unix datagram socket. [#105137][#105137] -- The default value of the `--format` parameter to `cockroach debug zip` is `tsv`, like other CLI commands that can extract SQL data. [#107474][#107474] -- The `debug.zip` now includes the `crdb_internal.probe_range` table with a limit of 100 rows to prevent the query from taking too long. [#107720][#107720] -- The default value for the `--max-sql-memory` parameter of the [`cockroach demo` command]({% link v23.2/cockroach-demo.md %}) has been increased from 128 MiB to 256 MiB. [#103642][#103642] -- The command `\demo recommission` has been removed from [`cockroach demo`]({% link v23.2/cockroach-demo.md %}). It had been obsolete and non-functional ever since v20.2. [#108566][#108566] -- Added limited `statement_statistics` to the debug ZIP file. [#108210][#108210] +- The command `cockroach debug merge-log` was adapted to understand time zones in input files read with format `crdb-v1` or `crdb-v2`. #104265 +- When customizing the SQL interactive prompt, `%M` and `%m` now behave more like `psql` when connecting over a Unix datagram socket. #105137 +- The default value of the `--format` parameter to `cockroach debug zip` is `tsv`, like other CLI commands that can extract SQL data. #107474 +- The `debug.zip` now includes the `crdb_internal.probe_range` table with a limit of 100 rows to prevent the query from taking too long. #107720 +- The default value for the `--max-sql-memory` parameter of the [`cockroach demo` command]({% link v23.2/cockroach-demo.md %}) has been increased from 128 MiB to 256 MiB. #103642 +- The command `\demo recommission` has been removed from [`cockroach demo`]({% link v23.2/cockroach-demo.md %}). It had been obsolete and non-functional ever since v20.2. #108566 +- Added limited `statement_statistics` to the debug ZIP file. #108210 - The following user-visible cluster settings have been renamed. The previous name is still available for backward compatibility. | Previous name | New name | @@ -398,7 +398,7 @@ When cluster virtualization is enabled: | `sql.trace.log_statement_execute` | `sql.log.all_statements.enabled` | | `trace.debug.enable`| `trace.http_debug_endpoint.enabled`| - [#109074][#109074] + #109074 - The following cluster settings have been renamed. The previous names are available for backward-compatibility. @@ -407,106 +407,106 @@ When cluster virtualization is enabled: | `spanconfig.tenant_coalesce_adjacent.enabled` | `spanconfig.range_coalescing.application.enabled` | | `spanconfig.storage_coalesce_adjacent.enabled` | `spanconfig.range_coalescing.system.enabled`| - [#109077][#109077] + #109077 -- The new `cockroach gen metric-list` command generates metadata that describes the various metrics collected by an idle server. The list does not include dynamic metric names whose names are generated based on the workload. [#109042][#109042] +- The new `cockroach gen metric-list` command generates metadata that describes the various metrics collected by an idle server. The list does not include dynamic metric names whose names are generated based on the workload. #109042

DB Console changes

-- The time window selection for metrics charts is now encoded in the URL via query params. [#101258][#101258] -- The [Job Details page]({% link v23.2/ui-jobs-page.md %}#job-details) now has a tabbed UI that will allow users to toggle between the Overview and other future views for advanced debugging and observability. [#102737][#102737] -- Renamed "recent executions" to "active executions" in the UI. [#103784][#103784] -- The [Changefeed Dashboard]({% link v23.2/ui-cdc-dashboard.md %}) has been updated with new graphs to track backfill progress, protected timestamps age, and the number of schema registry registrations. The updates include renaming the **Sink Byte Traffic** graph to **Emitted Bytes** and the **Max Changefeed Latency** graph to **Max Checkpoint Latency**. [#101790][#101790] +- The time window selection for metrics charts is now encoded in the URL via query params. #101258 +- The [Job Details page]({% link v23.2/ui-jobs-page.md %}#job-details) now has a tabbed UI that will allow users to toggle between the Overview and other future views for advanced debugging and observability. #102737 +- Renamed "recent executions" to "active executions" in the UI. #103784 +- The [Changefeed Dashboard]({% link v23.2/ui-cdc-dashboard.md %}) has been updated with new graphs to track backfill progress, protected timestamps age, and the number of schema registry registrations. The updates include renaming the **Sink Byte Traffic** graph to **Emitted Bytes** and the **Max Changefeed Latency** graph to **Max Checkpoint Latency**. #101790 - A new **Networking** tab has been added to the DB Console metrics dashboard. Metrics for network bytes sent and received are now displayed in the **Networking** tab rather than the **Hardware** tab. In addition, the following metrics have been added: - `cr.node.round-trip-latency-p50` - `cr.node.round-trip-latency-p99` - `cr.node.rpc.connection.unhealthy` - [#104394][#104394] + #104394 -- The Job Details page now has a profiler tab for more advanced observability into a job's execution. Currently, we support collecting a cluster-wide CPU profile of the job. [#103945][#103945] -- The active executions views in the SQL Activity pages now support toggling between automatic and manual refresh. A manual refresh button was also added along with a timestamp indicating when the last refresh was performed. [#103786][#103786] -- The visibility of the cluster setting `ui.display_timezone` has been set to public. Documentation of the cluster setting has been added. No functionality has been changed. [#106530][#106530] -- Added a table in the Profiler job details page that lists all the available files describing a job's execution details [#106879][#106879] -- Add columns for p50, p90, p99 percentiles and latency min and max on Explain Plan tab on the [Statement Execution Details page]({% link v23.2/ui-statements-page.md %}#statement-execution-details-page). [#107719][#107719] -- Fixed a broken query for the database details page that was causing an infinite loading state. [#107893][#107893] -- Added summary cards with total/average values for statistics on the Statement Execution Details page. [#109056][#109056] -- The DB Console now Shows a warning when the time period selected on SQL Activity pages is older than the oldest data available. [#109164][#109164] -- Users without the `VIEWCLUSTERSETTINGS` permission but with `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` can now see index recommendations. [#109047][#109047] -- The DB Console now allows non-admin users to view the [Databases page]({% link v23.2/ui-databases-page.md %}). [#109245][#109245] -- Non-admin users are able to use the Database Details page. [#109432][#109432] -- Non-admin users are able to use the Database Table page. [#109521][#109521] -- The "SQL Connection Rate" metric on the [SQL Dashboard]({% link v23.2/ui-sql-dashboard.md %}) is downsampled using the MAX function instead of SUM. This improves situations where zooming out would cause the connection rate to increase for downsampled data. [#110391][#110391] +- The Job Details page now has a profiler tab for more advanced observability into a job's execution. Currently, we support collecting a cluster-wide CPU profile of the job. #103945 +- The active executions views in the SQL Activity pages now support toggling between automatic and manual refresh. A manual refresh button was also added along with a timestamp indicating when the last refresh was performed. #103786 +- The visibility of the cluster setting `ui.display_timezone` has been set to public. Documentation of the cluster setting has been added. No functionality has been changed. #106530 +- Added a table in the Profiler job details page that lists all the available files describing a job's execution details #106879 +- Add columns for p50, p90, p99 percentiles and latency min and max on Explain Plan tab on the [Statement Execution Details page]({% link v23.2/ui-statements-page.md %}#statement-execution-details-page). #107719 +- Fixed a broken query for the database details page that was causing an infinite loading state. #107893 +- Added summary cards with total/average values for statistics on the Statement Execution Details page. #109056 +- The DB Console now Shows a warning when the time period selected on SQL Activity pages is older than the oldest data available. #109164 +- Users without the `VIEWCLUSTERSETTINGS` permission but with `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` can now see index recommendations. #109047 +- The DB Console now allows non-admin users to view the [Databases page]({% link v23.2/ui-databases-page.md %}). #109245 +- Non-admin users are able to use the Database Details page. #109432 +- Non-admin users are able to use the Database Table page. #109521 +- The "SQL Connection Rate" metric on the [SQL Dashboard]({% link v23.2/ui-sql-dashboard.md %}) is downsampled using the MAX function instead of SUM. This improves situations where zooming out would cause the connection rate to increase for downsampled data. #110391

Bug fixes

-- Fixed an internal error that can occur when [`CREATE OR REPLACE VIEW`]({% link v23.2/create-view.md %}) replaces a view with fewer columns and another entity depended on the view. [#99057][#99057] -- If views are created with circular dependencies, CockroachDB now returns an error (`cyclic view dependency for relation`) instead of crashing the node. This bug was present since at least 21.1. [#99174][#99174] -- Fixed a potential bug whereby a failed or cancelled [IMPORT]({% link v23.2/import.md %}) could in some cases leave some of the imported rows behind after it was cancelled, in the rare event that the writing processes were slow enough to continue writing after the cleanup process started. [#97071][#97071] -- Fixed a very rare bug that could cause keys to get unexpectedly deleted when rebalances occurred in a write-heavy workload. [#102164][#102164] -- It is now possible to properly redirect the output of SQL queries using the `ndjson` output table format in [`cockroach sql`]({% link v23.2/cockroach-sql.md %}). This bug had been introduced in v22.2. [#102595][#102595] -- The `unaccent` built-in [function]({% link v23.2/functions-and-operators.md %}) no longer removes spaces. [#103819][#103819] -- The details of errors pertaining to invalid descriptors are not included any more in redacted debug ZIP files. [#104050][#104050] -- Fixed a bug where join expressions were processed incorrectly. [#103782][#103782] -- Fixed a bug that could cause a [UDF]({% link v23.2/user-defined-functions.md %}) to return a value that does not conform to the return type of the UDF. This bug was only present for UDFs that return user-defined types. The bug was present since v23.1. [#104151][#104151] -- Fixed a bug where if a user was logged in while a different session dropped that user, the dropped user would still inherit privileges from the `public` role. Now, CockroachDB checks that the user exists before allowing it to inherit privileges from the `public` role. In addition, any active web sessions are now revoked when a user is dropped. [#104215][#104215] -- Fixed a bug in upstream `etcd-io/raft` which could result in pulling unlimited amount of logs into memory, and lead to out-of-memory errors. Now the log scan has a limited memory footprint. [#104483][#104483] -- Fixed a bug where, in rare circumstances, a [replication](https://cockroachlabs.com/docs/v23.2/architecture/replication-layer) could get stuck when proposed near lease or leadership changes, especially under overload, and the [replica circuit breakers]([../v23.2](https://cockroachlabs.com/docs/v23.2/architecture/replication-layer#per-replica-circuit-breakers) could trip. A previous attempt to fix this issue has been reverted in favor of this fix. [#106515][#106515] -- CockroachDB now automatically deletes statistics for dropped tables from the `system.table_statistics` table. [#105364][#105364] -- Fixed a rare internal error which occurs when a query uses a "project set" operation involving simple column expressions. [#104756][#104756] -- The [Raft]({% link v23.2/architecture/replication-layer.md %}#raft) `PreVote` and `CheckQuorum` mechanisms are now fully enabled. These prevent spurious elections when followers already have an active leader, and cause leaders to step down if they don't hear back from a quorum of followers. This improves reliability under partial and asymmetric network partitions, by avoiding spurious elections and preventing unavailability where a partially partitioned node could steal leadership away from an established leaseholder who would then no longer be able to reach the leader and submit writes. [#104042][#104042] -- Fixed a bug that could produce incorrect values for [virtual computed columns]({% link v23.2/computed-columns.md %}) in rare cases. The bug only occurred when the virtual column expression's type did not match the type of the virtual column. [#105736][#105736] -- Fixed a rounding error that could cause distributed execution for some decimal aggregate functions to return slightly inaccurate results in rare cases. [#105694][#105694] -- Fixed the `StatementStatistics.Nodes` to contain all of the nodes involved in the query. Fixed the region info in [`EXPLAIN ANALYZE (DISTSQL)`]({% link v23.2/explain-analyze.md %}) for virtual clusters. [#106587][#106587] -- Fixed a bug that caused [backups]({% link v23.2/take-full-and-incremental-backups.md %}) to fail if there are tables and functions of the same name. [#106626][#106626] -- Fixed edge cases in decimal and float evaluation for division operators. `'NaN'::DECIMAL / 0` will now return `NaN` instead of a division-by-zero error, and `0 / 'inf'::DECIMAL` will return `0` instead of `0E-2019`. [#106472][#106472] -- Fixed a bug present since before v22.2 that could cause a query with `LIMIT` and `ORDER BY` to return results in the wrong order. This bug could cause incorrect results as well if the `LIMIT` was nested within an outer query (e.g., under another `LIMIT`). [#106717][#106717] -- Added missing `SQLInstanceIDs` used to execute the statement to the telemetry `SampledQuery` event. [#106753][#106753] -- Fixed a bug where inserting geometries into a table with an inverted index involving a NaN coordinate could result in a panic. This now produces errors instead. [#106671][#106671] -- Avoid displaying `undefined` regions on the [Databases page]({% link v23.2/ui-databases-page.md %}). [#106778][#106778] -- The [`cockroach userfile upload` command]({% link v23.2/cockroach-userfile-upload.md %}) uses less memory when uploading a file. [#106056][#106056] -- `CASE`, `IF`, `COALESCE`, and `IFNULL` expressions now return an error when passed a generator function as an argument. This mirrors the behavior of PostgreSQL. [#105582][#105582] -- Fixed a bug that allowed views created with `CREATE OR REPLACE VIEW` to reference user-defined types in other databases, even with `sql.cross_db_views.enabled` set to `false`. This bug was present since user-defined types were introduced in v20.1. [#106869][#106869] -- Removed a source of unnecessary Raft snapshots during replica movement. [#106793][#106793] -- Fixed a bug where in rare situations nodes would get stuck during start-up. It would manifest itself through a stack frame sitting on a select in `waitForAdditionalStoreInit` for extended periods of time (i.e., minutes). [#107124][#107124] -- Fixed a bug that caused internal errors when using an aggregate function in an `ORDER BY` clause of a [`DELETE`]({% link v23.2/delete.md %}) or [`UPDATE`]({% link v23.2/update.md %}) statement. Aggregate functions are no longer allowed in these contexts. The bug has been present since at least v20.2. [#107641][#107641] -- The filter on the [Statements page]({% link v23.2/ui-statements-page.md %}) works when application name is an empty string. [#107750][#107750] -- The [Transaction Details page]({% link v23.2/ui-transactions-page.md %}#transaction-details-page) now loads with the fingerprint details even if no application is specified in the URL. [#107742][#107742] -- The [Schema Insights page]({% link v23.2/ui-insights-page.md %}#schema-insights-tab) no longer times out. [#107292][#107292] -- The last SQL statement in a user-defined function with a `VOID` return type can now produce any number of columns of any type. This bug was present since UDFs were introduced in v22.2. [#108299][#108299] -- Fixed a bug that caused nodes to crash when attempting to `EXECUTE` a prepared statement with an argument that referenced a user-defined function. This bug was present since user-defined functions were introduced in v22.2. [#108213][#108213] -- Fixed a bug where a release [save point]({% link v23.2/savepoint.md %}) could incorrectly emit a "cannot publish new versions for descriptors" error instead of a retryable error. [#108133][#108133] -- Users with the `VIEWACTIVITY` privilege now are able to see other users sessions from both the CLI and the DB Console. [#106590][#106590] -- Fixed a bug in [`cockroach demo`]({% link v23.2/cockroach-demo.md %}) whereby `\demo add` could sometimes crash with an error "`index out of range [...] with length ...`". This bug had been introduced in v19.x. [#108566][#108566] -- Fixed a bug introduced in v20.2 where the command `\demo decommission` in `cockroach demo` could leave the demo cluster in a broken state. [#108566][#108566] -- Fixed a bug where [`cockroach start`]({% link v23.2/cockroach-start.md %}) would sometimes incorrectly hang upon shutting down a server after encountering an internal error. This bug had been introduced some time in v22.x. [#108612][#108612] -- Fixed a bug in the index recommendations provided in the [`EXPLAIN`]({% link v23.2/explain.md %}) output where `ALTER INDEX ... VISIBLE` index recommendations may suggest making the wrong index visible when there are multiple invisible indexes in a table. [#108576][#108576] -- Users with the [`VIEWACTIVITY` privilege]({% link v23.2/grant.md %}) can now view correct values for timezones. [#108486][#108486] -- Fixed a bug present since v23.1.0 that would cause queries on the `pg_catalog.pg_statistic_ext` table to fail if a table was dropped recently. This bug also caused the `\d` CLI shortcut to encounter errors. [#108818][#108818] -- Fixed a bug where `pg_attribute` and `pg_attrdef` did not properly return results for generated columns. [#108964][#108964] -- Fixed a bug where a `SpanStatsRequest` would return post-replicated MVCC stats. Now, a `SpanStatsRequest` returns the logical MVCC stats for the requested span. [#108852][#108852] -- Fixed the column name on the selects on the tables `crdb_internal.node_txn_execution_insights` and `crdb_internal.cluster_txn_execution_insights` upon the creation of `debug.zip`. [#109444][#109444] -- Fixed the type resolution logic for `CASE` statements to more closely match Postgres' logic. In particular, we now adhere to rule 5 listed in the [PostgreSQL documentation](https://www.postgresql.org/docs/current/typeconv-union-case.html), which requires that we select the first non-unknown input type as the candidate type, then consider each other non-unknown input type, left to right (`CASE` treats its `ELSE` clause (if any) as the "first" input, with the `THEN` clauses(s) considered after that). If the candidate type can be implicitly converted to the other type, but not vice-versa, select the other type as the new candidate type. Then continue considering the remaining inputs. If, at any stage of this process, a preferred type is selected, stop considering additional inputs (note that CockroachDB does not yet support the concept of a "preferred type"). [#108387][#108387] -- Fixed an issue on the [Metrics page]({% link v23.2/ui-overview-dashboard.md %}) where no metrics would load when viewing metrics for a virtual cluster with a hyphenated name in a global context. [#109174][#109174] -- Fixed a potential livelock between a high-priority transactional read and a normal-priority write. The read pushes the timestamp of the write, but if the read gets pushed as well, it may repeatedly fail to refresh because it keeps encountering the intent of the write. [#108190][#108190] -- Fixed a nil dereference panic during node startup that could be caused by an incorrect initialization order. [#109659][#109659] -- The `difference` built-in had its return type incorrectly set to a string instead of an integer. [#109731][#109731] -- Fixed a bug that could cause a transaction performing multiple parallel foreign key checks to return a `concurrent txn use detected` error. [#109510][#109510] -- Fixed a bug causing performance regression when disabling `sql.metrics.statement_details.enabled` which caused execution stats to be collected for all queries instead of the default one percent. [#109785][#109785] -- Fixed a bug where certain SQL session variables meant to be hidden from introspection were showing up in `information_schema.session_variables`, which was incoherent with the handling in `pg_catalog.pg_settings`. [#109872][#109872] -- CockroachDB now properly handles RPC failures on writes using the parallel commit protocol that execute in parallel to the commit operation, avoiding incorrect retryable failures and `transaction unexpectedly committed` assertions by detecting when writes cannot be retried idempotently, instead returning an `AmbiguousResultError`. [#107658][#107658] -- Fixed a bug where dependencies on sequences from tables would be reported with the wrong value for the `classid` column in the `pg_catalog.pg_depend` table. [#110144][#110144] -- Two `ALTER RANGE default CONFIGURE ZONE` statements on the same line no longer displays an error. [#109774][#109774] -- Fixed a DB Console issue where the `DROP_UNUSED` index recommendations produced by the table details page produced an invalid `DROP INDEX` statement. [#110429][#110429] -- Removed buggy [TTL]({% link v23.2/row-level-ttl.md %}) descriptor repair. Previously, upgrading from v22.2.X to v23.1.9 incorrectly removed TTL storage parameters from tables (visible by running a `SHOW CREATE TABLE ;` statement) while attempting to repair table descriptors. This resulted in the node that attempted to run the TTL job crashing due to a panic caused by the missing TTL storage parameters. [#110364][#110364] -- `cockroach debug pebble` commands now work correctly with encrypted stores which don't use the default `cockroach-data` path without having to also pass `--store`. [#110150][#110150] -- Fixed a bug where `CREATE INDEX` for [partial indexes]({% link v23.2/partial-indexes.md %}) could fail with `ERROR: duplicate key value violates unique constraint` if concurrent inserts happened simultaneously. [#110216][#110216] -- Observability pages no longer crash when they encounter zeros (e.g., a session with no memory allocated). [#108752][#108752] +- Fixed an internal error that can occur when [`CREATE OR REPLACE VIEW`]({% link v23.2/create-view.md %}) replaces a view with fewer columns and another entity depended on the view. #99057 +- If views are created with circular dependencies, CockroachDB now returns an error (`cyclic view dependency for relation`) instead of crashing the node. This bug was present since at least 21.1. #99174 +- Fixed a potential bug whereby a failed or cancelled [IMPORT]({% link v23.2/import.md %}) could in some cases leave some of the imported rows behind after it was cancelled, in the rare event that the writing processes were slow enough to continue writing after the cleanup process started. #97071 +- Fixed a very rare bug that could cause keys to get unexpectedly deleted when rebalances occurred in a write-heavy workload. #102164 +- It is now possible to properly redirect the output of SQL queries using the `ndjson` output table format in [`cockroach sql`]({% link v23.2/cockroach-sql.md %}). This bug had been introduced in v22.2. #102595 +- The `unaccent` built-in [function]({% link v23.2/functions-and-operators.md %}) no longer removes spaces. #103819 +- The details of errors pertaining to invalid descriptors are not included any more in redacted debug ZIP files. #104050 +- Fixed a bug where join expressions were processed incorrectly. #103782 +- Fixed a bug that could cause a [UDF]({% link v23.2/user-defined-functions.md %}) to return a value that does not conform to the return type of the UDF. This bug was only present for UDFs that return user-defined types. The bug was present since v23.1. #104151 +- Fixed a bug where if a user was logged in while a different session dropped that user, the dropped user would still inherit privileges from the `public` role. Now, CockroachDB checks that the user exists before allowing it to inherit privileges from the `public` role. In addition, any active web sessions are now revoked when a user is dropped. #104215 +- Fixed a bug in upstream `etcd-io/raft` which could result in pulling unlimited amount of logs into memory, and lead to out-of-memory errors. Now the log scan has a limited memory footprint. #104483 +- Fixed a bug where, in rare circumstances, a [replication](https://cockroachlabs.com/docs/v23.2/architecture/replication-layer) could get stuck when proposed near lease or leadership changes, especially under overload, and the [replica circuit breakers]([../v23.2](https://cockroachlabs.com/docs/v23.2/architecture/replication-layer#per-replica-circuit-breakers) could trip. A previous attempt to fix this issue has been reverted in favor of this fix. #106515 +- CockroachDB now automatically deletes statistics for dropped tables from the `system.table_statistics` table. #105364 +- Fixed a rare internal error which occurs when a query uses a "project set" operation involving simple column expressions. #104756 +- The [Raft]({% link v23.2/architecture/replication-layer.md %}#raft) `PreVote` and `CheckQuorum` mechanisms are now fully enabled. These prevent spurious elections when followers already have an active leader, and cause leaders to step down if they don't hear back from a quorum of followers. This improves reliability under partial and asymmetric network partitions, by avoiding spurious elections and preventing unavailability where a partially partitioned node could steal leadership away from an established leaseholder who would then no longer be able to reach the leader and submit writes. #104042 +- Fixed a bug that could produce incorrect values for [virtual computed columns]({% link v23.2/computed-columns.md %}) in rare cases. The bug only occurred when the virtual column expression's type did not match the type of the virtual column. #105736 +- Fixed a rounding error that could cause distributed execution for some decimal aggregate functions to return slightly inaccurate results in rare cases. #105694 +- Fixed the `StatementStatistics.Nodes` to contain all of the nodes involved in the query. Fixed the region info in [`EXPLAIN ANALYZE (DISTSQL)`]({% link v23.2/explain-analyze.md %}) for virtual clusters. #106587 +- Fixed a bug that caused [backups]({% link v23.2/take-full-and-incremental-backups.md %}) to fail if there are tables and functions of the same name. #106626 +- Fixed edge cases in decimal and float evaluation for division operators. `'NaN'::DECIMAL / 0` will now return `NaN` instead of a division-by-zero error, and `0 / 'inf'::DECIMAL` will return `0` instead of `0E-2019`. #106472 +- Fixed a bug present since before v22.2 that could cause a query with `LIMIT` and `ORDER BY` to return results in the wrong order. This bug could cause incorrect results as well if the `LIMIT` was nested within an outer query (e.g., under another `LIMIT`). #106717 +- Added missing `SQLInstanceIDs` used to execute the statement to the telemetry `SampledQuery` event. #106753 +- Fixed a bug where inserting geometries into a table with an inverted index involving a NaN coordinate could result in a panic. This now produces errors instead. #106671 +- Avoid displaying `undefined` regions on the [Databases page]({% link v23.2/ui-databases-page.md %}). #106778 +- The [`cockroach userfile upload` command]({% link v23.2/cockroach-userfile-upload.md %}) uses less memory when uploading a file. #106056 +- `CASE`, `IF`, `COALESCE`, and `IFNULL` expressions now return an error when passed a generator function as an argument. This mirrors the behavior of PostgreSQL. #105582 +- Fixed a bug that allowed views created with `CREATE OR REPLACE VIEW` to reference user-defined types in other databases, even with `sql.cross_db_views.enabled` set to `false`. This bug was present since user-defined types were introduced in v20.1. #106869 +- Removed a source of unnecessary Raft snapshots during replica movement. #106793 +- Fixed a bug where in rare situations nodes would get stuck during start-up. It would manifest itself through a stack frame sitting on a select in `waitForAdditionalStoreInit` for extended periods of time (i.e., minutes). #107124 +- Fixed a bug that caused internal errors when using an aggregate function in an `ORDER BY` clause of a [`DELETE`]({% link v23.2/delete.md %}) or [`UPDATE`]({% link v23.2/update.md %}) statement. Aggregate functions are no longer allowed in these contexts. The bug has been present since at least v20.2. #107641 +- The filter on the [Statements page]({% link v23.2/ui-statements-page.md %}) works when application name is an empty string. #107750 +- The [Transaction Details page]({% link v23.2/ui-transactions-page.md %}#transaction-details-page) now loads with the fingerprint details even if no application is specified in the URL. #107742 +- The [Schema Insights page]({% link v23.2/ui-insights-page.md %}#schema-insights-tab) no longer times out. #107292 +- The last SQL statement in a user-defined function with a `VOID` return type can now produce any number of columns of any type. This bug was present since UDFs were introduced in v22.2. #108299 +- Fixed a bug that caused nodes to crash when attempting to `EXECUTE` a prepared statement with an argument that referenced a user-defined function. This bug was present since user-defined functions were introduced in v22.2. #108213 +- Fixed a bug where a release [save point]({% link v23.2/savepoint.md %}) could incorrectly emit a "cannot publish new versions for descriptors" error instead of a retryable error. #108133 +- Users with the `VIEWACTIVITY` privilege now are able to see other users sessions from both the CLI and the DB Console. #106590 +- Fixed a bug in [`cockroach demo`]({% link v23.2/cockroach-demo.md %}) whereby `\demo add` could sometimes crash with an error "`index out of range [...] with length ...`". This bug had been introduced in v19.x. #108566 +- Fixed a bug introduced in v20.2 where the command `\demo decommission` in `cockroach demo` could leave the demo cluster in a broken state. #108566 +- Fixed a bug where [`cockroach start`]({% link v23.2/cockroach-start.md %}) would sometimes incorrectly hang upon shutting down a server after encountering an internal error. This bug had been introduced some time in v22.x. #108612 +- Fixed a bug in the index recommendations provided in the [`EXPLAIN`]({% link v23.2/explain.md %}) output where `ALTER INDEX ... VISIBLE` index recommendations may suggest making the wrong index visible when there are multiple invisible indexes in a table. #108576 +- Users with the [`VIEWACTIVITY` privilege]({% link v23.2/grant.md %}) can now view correct values for timezones. #108486 +- Fixed a bug present since v23.1.0 that would cause queries on the `pg_catalog.pg_statistic_ext` table to fail if a table was dropped recently. This bug also caused the `\d` CLI shortcut to encounter errors. #108818 +- Fixed a bug where `pg_attribute` and `pg_attrdef` did not properly return results for generated columns. #108964 +- Fixed a bug where a `SpanStatsRequest` would return post-replicated MVCC stats. Now, a `SpanStatsRequest` returns the logical MVCC stats for the requested span. #108852 +- Fixed the column name on the selects on the tables `crdb_internal.node_txn_execution_insights` and `crdb_internal.cluster_txn_execution_insights` upon the creation of `debug.zip`. #109444 +- Fixed the type resolution logic for `CASE` statements to more closely match Postgres' logic. In particular, we now adhere to rule 5 listed in the [PostgreSQL documentation](https://www.postgresql.org/docs/current/typeconv-union-case.html), which requires that we select the first non-unknown input type as the candidate type, then consider each other non-unknown input type, left to right (`CASE` treats its `ELSE` clause (if any) as the "first" input, with the `THEN` clauses(s) considered after that). If the candidate type can be implicitly converted to the other type, but not vice-versa, select the other type as the new candidate type. Then continue considering the remaining inputs. If, at any stage of this process, a preferred type is selected, stop considering additional inputs (note that CockroachDB does not yet support the concept of a "preferred type"). #108387 +- Fixed an issue on the [Metrics page]({% link v23.2/ui-overview-dashboard.md %}) where no metrics would load when viewing metrics for a virtual cluster with a hyphenated name in a global context. #109174 +- Fixed a potential livelock between a high-priority transactional read and a normal-priority write. The read pushes the timestamp of the write, but if the read gets pushed as well, it may repeatedly fail to refresh because it keeps encountering the intent of the write. #108190 +- Fixed a nil dereference panic during node startup that could be caused by an incorrect initialization order. #109659 +- The `difference` built-in had its return type incorrectly set to a string instead of an integer. #109731 +- Fixed a bug that could cause a transaction performing multiple parallel foreign key checks to return a `concurrent txn use detected` error. #109510 +- Fixed a bug causing performance regression when disabling `sql.metrics.statement_details.enabled` which caused execution stats to be collected for all queries instead of the default one percent. #109785 +- Fixed a bug where certain SQL session variables meant to be hidden from introspection were showing up in `information_schema.session_variables`, which was incoherent with the handling in `pg_catalog.pg_settings`. #109872 +- CockroachDB now properly handles RPC failures on writes using the parallel commit protocol that execute in parallel to the commit operation, avoiding incorrect retryable failures and `transaction unexpectedly committed` assertions by detecting when writes cannot be retried idempotently, instead returning an `AmbiguousResultError`. #107658 +- Fixed a bug where dependencies on sequences from tables would be reported with the wrong value for the `classid` column in the `pg_catalog.pg_depend` table. #110144 +- Two `ALTER RANGE default CONFIGURE ZONE` statements on the same line no longer displays an error. #109774 +- Fixed a DB Console issue where the `DROP_UNUSED` index recommendations produced by the table details page produced an invalid `DROP INDEX` statement. #110429 +- Removed buggy [TTL]({% link v23.2/row-level-ttl.md %}) descriptor repair. Previously, upgrading from v22.2.X to v23.1.9 incorrectly removed TTL storage parameters from tables (visible by running a `SHOW CREATE TABLE ;` statement) while attempting to repair table descriptors. This resulted in the node that attempted to run the TTL job crashing due to a panic caused by the missing TTL storage parameters. #110364 +- `cockroach debug pebble` commands now work correctly with encrypted stores which don't use the default `cockroach-data` path without having to also pass `--store`. #110150 +- Fixed a bug where `CREATE INDEX` for [partial indexes]({% link v23.2/partial-indexes.md %}) could fail with `ERROR: duplicate key value violates unique constraint` if concurrent inserts happened simultaneously. #110216 +- Observability pages no longer crash when they encounter zeros (e.g., a session with no memory allocated). #108752 - Removed the [cluster setting]({% link v23.2/cluster-settings.md %}) `kv.snapshot_recovery.max_rate`: - In v23.2, this setting is disabled; it is a no-op. If you previously set `kv.snapshot_recovery.max_rate` on a cluster running v23.1 and upgraded to v23.2, the setting is ignored, and the [`kv.snapshot_rebalance.max_rate`]({% link v23.2/cluster-settings.md %}#setting-kv-snapshot-rebalance-max-rate ) setting is used instead. - - In v24.1 and later, this setting is removed entirely. If you had previously set `kv.snapshot_recovery.max_rate` prior to upgrade, it will be cleared, and any attempts to set it will fail with the error message: `ERROR: unknown cluster setting 'kv.snapshot_recovery.max_rate'`. [#102596][#102596] + - In v24.1 and later, this setting is removed entirely. If you had previously set `kv.snapshot_recovery.max_rate` prior to upgrade, it will be cleared, and any attempts to set it will fail with the error message: `ERROR: unknown cluster setting 'kv.snapshot_recovery.max_rate'`. #102596 - Fixed a bug in which a `CREATE FUNCTION` may produce a syntax error if the UDF body wrapped in tagged dollar quotes (e.g., `$func$`), contains two consecutive dollar signs `$$`. If the UDF body is known to contain dollar signs, then the caller should use tagged dollar quotes or single quotes when defining the UDF. For example: {% include_cached copy-clipboard.html %} @@ -514,46 +514,46 @@ When cluster virtualization is enabled: CREATE FUNCTION f(a STRING) RETURNS STRING LANGUAGE SQL AS $func$ SELECT concat('$$', a); $func$ ~~~ - [#101352][#101352] + #101352 -- CockroachDB now prevents setting `max_range_size` below the `COCKROACH_MIN_RANGE_MAX_BYTES` environment variable, which defaults to 64 MiB (half of the default minimum range size). [#96725][#96725] -- Fixed a bug that could occasionally cause schema change jobs, such as table or index drops, to appear stuck in state "waiting for MVCC GC" for much longer than expected. The fix only applies to future schema changes. To process existing stuck jobs, manually force-enqueue the relevant ranges in the MVCC GC queue from the DB Console's [Advanced Debug](https://cockroachlabs.com/docs/v23.2/ui-debug-pages) page. [#110078][#110078] -- Fixed a bug introduced when the `ChartCatalog` API endpoint was introduced, where the endpoint did not correctly report the unit of metrics. [#109042][#109042] -- Fixed a bug that could occur when the "multiple active portals" execution mode (Preview) was enabled to evaluate queries such as lookup joins. The bug could result in an internal error like `unexpected 40960 leftover bytes` if the portal was not fully consumed. [#110625][#110625] -- Fixed a bug where an `ALTER TABLE ... ADD CONSTRAINT CHECK ...` statement that utilized a user-defined function in the `CHECK` could cause a validation error. [#110130][#110130] -- Fixed a bug where `RESET (ttl_expire_after)` could incorrectly remove `ttl_expiration_expression`. [#110252][#110252] -- Fixed a bug where the `format_type` built-in did not honor `typemod` information for array types, leading to incorrect output. [#110900][#110900] -- Fixed a bug introduced in v22.2 that incorrectly allowed users without the `EXECUTE` privilege to execute a user-defined function. [#107587][#107587] +- CockroachDB now prevents setting `max_range_size` below the `COCKROACH_MIN_RANGE_MAX_BYTES` environment variable, which defaults to 64 MiB (half of the default minimum range size). #96725 +- Fixed a bug that could occasionally cause schema change jobs, such as table or index drops, to appear stuck in state "waiting for MVCC GC" for much longer than expected. The fix only applies to future schema changes. To process existing stuck jobs, manually force-enqueue the relevant ranges in the MVCC GC queue from the DB Console's [Advanced Debug](https://cockroachlabs.com/docs/v23.2/ui-debug-pages) page. #110078 +- Fixed a bug introduced when the `ChartCatalog` API endpoint was introduced, where the endpoint did not correctly report the unit of metrics. #109042 +- Fixed a bug that could occur when the "multiple active portals" execution mode (Preview) was enabled to evaluate queries such as lookup joins. The bug could result in an internal error like `unexpected 40960 leftover bytes` if the portal was not fully consumed. #110625 +- Fixed a bug where an `ALTER TABLE ... ADD CONSTRAINT CHECK ...` statement that utilized a user-defined function in the `CHECK` could cause a validation error. #110130 +- Fixed a bug where `RESET (ttl_expire_after)` could incorrectly remove `ttl_expiration_expression`. #110252 +- Fixed a bug where the `format_type` built-in did not honor `typemod` information for array types, leading to incorrect output. #110900 +- Fixed a bug introduced in v22.2 that incorrectly allowed users without the `EXECUTE` privilege to execute a user-defined function. #107587

Performance improvements

-- The [optimizer]({% link v23.2/cost-based-optimizer.md %}) now plans inverted index scans for queries using `IN` or the `=` operators without the fetch val (`->`) operator. For example: `json_col = '{"b":"c"}' OR json_col IN ('"a"', '1')` [#101178][#101178] -- Queries that have subqueries in equality expressions are now more efficiently planned by the optimizer. [#100881][#100881] -- Query planning time has been reduced for some queries with multiple [joins]({% link v23.2/joins.md %}). [#102011][#102011] -- CockroachDB now enables the pacing mechanism in rangefeed closed timestamp notifications, by setting the default `kv.rangefeed.closed_timestamp_smear_interval` cluster setting to 1ms. This makes rangefeed closed timestamp delivery more uniform and less spikey, which reduces its impact on the Go scheduler and, ultimately, foreground SQL latencies. [#103006][#103006] -- Some large, long-running [`INSERT`]({% link v23.2/insert.md %}) statements now perform less work during their commit phase and can run faster. [#103241][#103241] -- Ranges now only quiesce after 3 seconds without proposals, to avoid frequent unquiescence which incurs an additional Raft proposal. This is configurable via the `COCKROACH_QUIESCE_AFTER_TICKS` environment variable, which defaults to 6. [#103266][#103266] -- SQL statements that must clean up intents from many different previously abandoned transactions now do so moderately more efficiently. [#103265][#103265] -- The optimizer can now avoid a grouping stage in more cases when de-duplicating the input to an [`UPSERT`]({% link v23.2/upsert.md %}) or `INSERT ... ON CONFLICT` statement. [#105206][#105206] -- The optimizer can now eliminate joins in more cases. [#105214][#105214] -- CockroachDB now improves the time to disk space reclamation when deleting rows. Previously, in scenarios where rows had large variations in row size, it was possible for disk space to not be reclaimed after MVCC garbage collection deleted the rows. [#104539][#104539] -- CockroachDB now has improved disk space reclamation heuristics, making disk space reclamation more timely. [#106177][#106177] -- `bool_and` and `bool_or` aggregates will now scale linearly instead of quadratically when used as a window function with a non-shrinking window, [#106477][#106477] -- CockroachDB now has reduced lock contention on `ssmemstorage.RecordStatement`. This is useful for workloads that execute the same statement concurrently on the same SQL instance. [#106860][#106860] -- The optimizer now produces more efficient query plans in some cases for queries with subqueries and user-defined functions. [#107133][#107133] -- The default Raft entry cache size has been increased from 16 MB to 1/256 of system memory with a minimum of 32 MB, divided evenly between all stores. This can be configured using the `COCKROACH_RAFT_ENTRY_CACHE_SIZE` environment variable. [#107424][#107424] -- CockroachDB now automatically collects table statistics on the `system.jobs` table, which will enable the optimizer to produce better query plans for internal queries that access the `system.jobs` table. This may result in better performance of the system. [#108139][#108139] -- The impact of high concurrency blind writes to the same key on goroutine scheduling latency was reduced. [#109349][#109349] -- [Changefeeds]({% link v23.2/change-data-capture-overview.md %}) to Webhook or Pub/Sub endpoints now support much higher throughput [#109351][#109351] -- This release improved the cost of resolving a user-defined enum type that has many values. [#109394][#109394] -- Queries that compare collated strings now use less memory and may execute faster. [#110066][#110066] -- Added a scheduler based rangefeed processor which improves rangefeed and changefeed performance for very large tables. The new processor is disabled by default, but can be enabled by setting `kv.rangefeed.scheduler.enabled` cluster setting to `true`. [#107553][#107553] -- This release disables `sql.defaults.zigzag_join.enabled` by default. [#110214][#110214] +- The [optimizer]({% link v23.2/cost-based-optimizer.md %}) now plans inverted index scans for queries using `IN` or the `=` operators without the fetch val (`->`) operator. For example: `json_col = '{"b":"c"}' OR json_col IN ('"a"', '1')` #101178 +- Queries that have subqueries in equality expressions are now more efficiently planned by the optimizer. #100881 +- Query planning time has been reduced for some queries with multiple [joins]({% link v23.2/joins.md %}). #102011 +- CockroachDB now enables the pacing mechanism in rangefeed closed timestamp notifications, by setting the default `kv.rangefeed.closed_timestamp_smear_interval` cluster setting to 1ms. This makes rangefeed closed timestamp delivery more uniform and less spikey, which reduces its impact on the Go scheduler and, ultimately, foreground SQL latencies. #103006 +- Some large, long-running [`INSERT`]({% link v23.2/insert.md %}) statements now perform less work during their commit phase and can run faster. #103241 +- Ranges now only quiesce after 3 seconds without proposals, to avoid frequent unquiescence which incurs an additional Raft proposal. This is configurable via the `COCKROACH_QUIESCE_AFTER_TICKS` environment variable, which defaults to 6. #103266 +- SQL statements that must clean up intents from many different previously abandoned transactions now do so moderately more efficiently. #103265 +- The optimizer can now avoid a grouping stage in more cases when de-duplicating the input to an [`UPSERT`]({% link v23.2/upsert.md %}) or `INSERT ... ON CONFLICT` statement. #105206 +- The optimizer can now eliminate joins in more cases. #105214 +- CockroachDB now improves the time to disk space reclamation when deleting rows. Previously, in scenarios where rows had large variations in row size, it was possible for disk space to not be reclaimed after MVCC garbage collection deleted the rows. #104539 +- CockroachDB now has improved disk space reclamation heuristics, making disk space reclamation more timely. #106177 +- `bool_and` and `bool_or` aggregates will now scale linearly instead of quadratically when used as a window function with a non-shrinking window, #106477 +- CockroachDB now has reduced lock contention on `ssmemstorage.RecordStatement`. This is useful for workloads that execute the same statement concurrently on the same SQL instance. #106860 +- The optimizer now produces more efficient query plans in some cases for queries with subqueries and user-defined functions. #107133 +- The default Raft entry cache size has been increased from 16 MB to 1/256 of system memory with a minimum of 32 MB, divided evenly between all stores. This can be configured using the `COCKROACH_RAFT_ENTRY_CACHE_SIZE` environment variable. #107424 +- CockroachDB now automatically collects table statistics on the `system.jobs` table, which will enable the optimizer to produce better query plans for internal queries that access the `system.jobs` table. This may result in better performance of the system. #108139 +- The impact of high concurrency blind writes to the same key on goroutine scheduling latency was reduced. #109349 +- [Changefeeds]({% link v23.2/change-data-capture-overview.md %}) to Webhook or Pub/Sub endpoints now support much higher throughput #109351 +- This release improved the cost of resolving a user-defined enum type that has many values. #109394 +- Queries that compare collated strings now use less memory and may execute faster. #110066 +- Added a scheduler based rangefeed processor which improves rangefeed and changefeed performance for very large tables. The new processor is disabled by default, but can be enabled by setting `kv.rangefeed.scheduler.enabled` cluster setting to `true`. #107553 +- This release disables `sql.defaults.zigzag_join.enabled` by default. #110214

Build changes

-- Go has been upgraded to 1.20.8. [#109773][#109773] -- The top-level `Makefile` was replaced by a stub `GNUmakefile` which defers its behavior to `dev`. The common targets `make [all]`, `make test`, and `make install` remain for compatibility with most UNIX installation guides. The previous `make` rules remain available via `make -C build/GNUmakefile.obsolete`. [#84565][#84565] +- Go has been upgraded to 1.20.8. #109773 +- The top-level `Makefile` was replaced by a stub `GNUmakefile` which defers its behavior to `dev`. The common targets `make [all]`, `make test`, and `make install` remain for compatibility with most UNIX installation guides. The previous `make` rules remain available via `make -C build/GNUmakefile.obsolete`. #84565
@@ -563,467 +563,3 @@ This release includes 3208 merged PRs by 124 authors.
-[#100113]: https://github.com/cockroachdb/cockroach/pull/100113 -[#100199]: https://github.com/cockroachdb/cockroach/pull/100199 -[#100524]: https://github.com/cockroachdb/cockroach/pull/100524 -[#100612]: https://github.com/cockroachdb/cockroach/pull/100612 -[#100742]: https://github.com/cockroachdb/cockroach/pull/100742 -[#100858]: https://github.com/cockroachdb/cockroach/pull/100858 -[#100881]: https://github.com/cockroachdb/cockroach/pull/100881 -[#100898]: https://github.com/cockroachdb/cockroach/pull/100898 -[#100929]: https://github.com/cockroachdb/cockroach/pull/100929 -[#100942]: https://github.com/cockroachdb/cockroach/pull/100942 -[#101095]: https://github.com/cockroachdb/cockroach/pull/101095 -[#101178]: https://github.com/cockroachdb/cockroach/pull/101178 -[#101258]: https://github.com/cockroachdb/cockroach/pull/101258 -[#101334]: https://github.com/cockroachdb/cockroach/pull/101334 -[#101352]: https://github.com/cockroachdb/cockroach/pull/101352 -[#101388]: https://github.com/cockroachdb/cockroach/pull/101388 -[#101525]: https://github.com/cockroachdb/cockroach/pull/101525 -[#101675]: https://github.com/cockroachdb/cockroach/pull/101675 -[#101787]: https://github.com/cockroachdb/cockroach/pull/101787 -[#101790]: https://github.com/cockroachdb/cockroach/pull/101790 -[#101812]: https://github.com/cockroachdb/cockroach/pull/101812 -[#101869]: https://github.com/cockroachdb/cockroach/pull/101869 -[#101931]: https://github.com/cockroachdb/cockroach/pull/101931 -[#101932]: https://github.com/cockroachdb/cockroach/pull/101932 -[#102011]: https://github.com/cockroachdb/cockroach/pull/102011 -[#102033]: https://github.com/cockroachdb/cockroach/pull/102033 -[#102164]: https://github.com/cockroachdb/cockroach/pull/102164 -[#102595]: https://github.com/cockroachdb/cockroach/pull/102595 -[#102596]: https://github.com/cockroachdb/cockroach/pull/102596 -[#102607]: https://github.com/cockroachdb/cockroach/pull/102607 -[#102708]: https://github.com/cockroachdb/cockroach/pull/102708 -[#102717]: https://github.com/cockroachdb/cockroach/pull/102717 -[#102737]: https://github.com/cockroachdb/cockroach/pull/102737 -[#102772]: https://github.com/cockroachdb/cockroach/pull/102772 -[#102773]: https://github.com/cockroachdb/cockroach/pull/102773 -[#102890]: https://github.com/cockroachdb/cockroach/pull/102890 -[#102961]: https://github.com/cockroachdb/cockroach/pull/102961 -[#102974]: https://github.com/cockroachdb/cockroach/pull/102974 -[#103006]: https://github.com/cockroachdb/cockroach/pull/103006 -[#103051]: https://github.com/cockroachdb/cockroach/pull/103051 -[#103077]: https://github.com/cockroachdb/cockroach/pull/103077 -[#103115]: https://github.com/cockroachdb/cockroach/pull/103115 -[#103128]: https://github.com/cockroachdb/cockroach/pull/103128 -[#103138]: https://github.com/cockroachdb/cockroach/pull/103138 -[#103145]: https://github.com/cockroachdb/cockroach/pull/103145 -[#103241]: https://github.com/cockroachdb/cockroach/pull/103241 -[#103259]: https://github.com/cockroachdb/cockroach/pull/103259 -[#103265]: https://github.com/cockroachdb/cockroach/pull/103265 -[#103266]: https://github.com/cockroachdb/cockroach/pull/103266 -[#103285]: https://github.com/cockroachdb/cockroach/pull/103285 -[#103301]: https://github.com/cockroachdb/cockroach/pull/103301 -[#103308]: https://github.com/cockroachdb/cockroach/pull/103308 -[#103436]: https://github.com/cockroachdb/cockroach/pull/103436 -[#103476]: https://github.com/cockroachdb/cockroach/pull/103476 -[#103482]: https://github.com/cockroachdb/cockroach/pull/103482 -[#103525]: https://github.com/cockroachdb/cockroach/pull/103525 -[#103531]: https://github.com/cockroachdb/cockroach/pull/103531 -[#103539]: https://github.com/cockroachdb/cockroach/pull/103539 -[#103590]: https://github.com/cockroachdb/cockroach/pull/103590 -[#103598]: https://github.com/cockroachdb/cockroach/pull/103598 -[#103642]: https://github.com/cockroachdb/cockroach/pull/103642 -[#103670]: https://github.com/cockroachdb/cockroach/pull/103670 -[#103780]: https://github.com/cockroachdb/cockroach/pull/103780 -[#103782]: https://github.com/cockroachdb/cockroach/pull/103782 -[#103784]: https://github.com/cockroachdb/cockroach/pull/103784 -[#103786]: https://github.com/cockroachdb/cockroach/pull/103786 -[#103819]: https://github.com/cockroachdb/cockroach/pull/103819 -[#103921]: https://github.com/cockroachdb/cockroach/pull/103921 -[#103923]: https://github.com/cockroachdb/cockroach/pull/103923 -[#103930]: https://github.com/cockroachdb/cockroach/pull/103930 -[#103945]: https://github.com/cockroachdb/cockroach/pull/103945 -[#103963]: https://github.com/cockroachdb/cockroach/pull/103963 -[#103971]: https://github.com/cockroachdb/cockroach/pull/103971 -[#104042]: https://github.com/cockroachdb/cockroach/pull/104042 -[#104050]: https://github.com/cockroachdb/cockroach/pull/104050 -[#104079]: https://github.com/cockroachdb/cockroach/pull/104079 -[#104111]: https://github.com/cockroachdb/cockroach/pull/104111 -[#104151]: https://github.com/cockroachdb/cockroach/pull/104151 -[#104215]: https://github.com/cockroachdb/cockroach/pull/104215 -[#104234]: https://github.com/cockroachdb/cockroach/pull/104234 -[#104265]: https://github.com/cockroachdb/cockroach/pull/104265 -[#104302]: https://github.com/cockroachdb/cockroach/pull/104302 -[#104375]: https://github.com/cockroachdb/cockroach/pull/104375 -[#104376]: https://github.com/cockroachdb/cockroach/pull/104376 -[#104394]: https://github.com/cockroachdb/cockroach/pull/104394 -[#104410]: https://github.com/cockroachdb/cockroach/pull/104410 -[#104417]: https://github.com/cockroachdb/cockroach/pull/104417 -[#104439]: https://github.com/cockroachdb/cockroach/pull/104439 -[#104483]: https://github.com/cockroachdb/cockroach/pull/104483 -[#104528]: https://github.com/cockroachdb/cockroach/pull/104528 -[#104539]: https://github.com/cockroachdb/cockroach/pull/104539 -[#104585]: https://github.com/cockroachdb/cockroach/pull/104585 -[#104610]: https://github.com/cockroachdb/cockroach/pull/104610 -[#104755]: https://github.com/cockroachdb/cockroach/pull/104755 -[#104756]: https://github.com/cockroachdb/cockroach/pull/104756 -[#104772]: https://github.com/cockroachdb/cockroach/pull/104772 -[#104777]: https://github.com/cockroachdb/cockroach/pull/104777 -[#104915]: https://github.com/cockroachdb/cockroach/pull/104915 -[#104929]: https://github.com/cockroachdb/cockroach/pull/104929 -[#104945]: https://github.com/cockroachdb/cockroach/pull/104945 -[#105006]: https://github.com/cockroachdb/cockroach/pull/105006 -[#105009]: https://github.com/cockroachdb/cockroach/pull/105009 -[#105031]: https://github.com/cockroachdb/cockroach/pull/105031 -[#105122]: https://github.com/cockroachdb/cockroach/pull/105122 -[#105137]: https://github.com/cockroachdb/cockroach/pull/105137 -[#105206]: https://github.com/cockroachdb/cockroach/pull/105206 -[#105214]: https://github.com/cockroachdb/cockroach/pull/105214 -[#105270]: https://github.com/cockroachdb/cockroach/pull/105270 -[#105326]: https://github.com/cockroachdb/cockroach/pull/105326 -[#105364]: https://github.com/cockroachdb/cockroach/pull/105364 -[#105456]: https://github.com/cockroachdb/cockroach/pull/105456 -[#105477]: https://github.com/cockroachdb/cockroach/pull/105477 -[#105521]: https://github.com/cockroachdb/cockroach/pull/105521 -[#105530]: https://github.com/cockroachdb/cockroach/pull/105530 -[#105582]: https://github.com/cockroachdb/cockroach/pull/105582 -[#105654]: https://github.com/cockroachdb/cockroach/pull/105654 -[#105694]: https://github.com/cockroachdb/cockroach/pull/105694 -[#105716]: https://github.com/cockroachdb/cockroach/pull/105716 -[#105736]: https://github.com/cockroachdb/cockroach/pull/105736 -[#105750]: https://github.com/cockroachdb/cockroach/pull/105750 -[#105839]: https://github.com/cockroachdb/cockroach/pull/105839 -[#105842]: https://github.com/cockroachdb/cockroach/pull/105842 -[#105857]: https://github.com/cockroachdb/cockroach/pull/105857 -[#105876]: https://github.com/cockroachdb/cockroach/pull/105876 -[#105877]: https://github.com/cockroachdb/cockroach/pull/105877 -[#105881]: https://github.com/cockroachdb/cockroach/pull/105881 -[#105944]: https://github.com/cockroachdb/cockroach/pull/105944 -[#106056]: https://github.com/cockroachdb/cockroach/pull/106056 -[#106082]: https://github.com/cockroachdb/cockroach/pull/106082 -[#106094]: https://github.com/cockroachdb/cockroach/pull/106094 -[#106103]: https://github.com/cockroachdb/cockroach/pull/106103 -[#106104]: https://github.com/cockroachdb/cockroach/pull/106104 -[#106117]: https://github.com/cockroachdb/cockroach/pull/106117 -[#106145]: https://github.com/cockroachdb/cockroach/pull/106145 -[#106177]: https://github.com/cockroachdb/cockroach/pull/106177 -[#106242]: https://github.com/cockroachdb/cockroach/pull/106242 -[#106267]: https://github.com/cockroachdb/cockroach/pull/106267 -[#106270]: https://github.com/cockroachdb/cockroach/pull/106270 -[#106351]: https://github.com/cockroachdb/cockroach/pull/106351 -[#106402]: https://github.com/cockroachdb/cockroach/pull/106402 -[#106438]: https://github.com/cockroachdb/cockroach/pull/106438 -[#106445]: https://github.com/cockroachdb/cockroach/pull/106445 -[#106472]: https://github.com/cockroachdb/cockroach/pull/106472 -[#106477]: https://github.com/cockroachdb/cockroach/pull/106477 -[#106525]: https://github.com/cockroachdb/cockroach/pull/106525 -[#106530]: https://github.com/cockroachdb/cockroach/pull/106530 -[#106549]: https://github.com/cockroachdb/cockroach/pull/106549 -[#106574]: https://github.com/cockroachdb/cockroach/pull/106574 -[#106587]: https://github.com/cockroachdb/cockroach/pull/106587 -[#106590]: https://github.com/cockroachdb/cockroach/pull/106590 -[#106595]: https://github.com/cockroachdb/cockroach/pull/106595 -[#106607]: https://github.com/cockroachdb/cockroach/pull/106607 -[#106626]: https://github.com/cockroachdb/cockroach/pull/106626 -[#106642]: https://github.com/cockroachdb/cockroach/pull/106642 -[#106671]: https://github.com/cockroachdb/cockroach/pull/106671 -[#106717]: https://github.com/cockroachdb/cockroach/pull/106717 -[#106750]: https://github.com/cockroachdb/cockroach/pull/106750 -[#106753]: https://github.com/cockroachdb/cockroach/pull/106753 -[#106778]: https://github.com/cockroachdb/cockroach/pull/106778 -[#106793]: https://github.com/cockroachdb/cockroach/pull/106793 -[#106860]: https://github.com/cockroachdb/cockroach/pull/106860 -[#106869]: https://github.com/cockroachdb/cockroach/pull/106869 -[#106879]: https://github.com/cockroachdb/cockroach/pull/106879 -[#106952]: https://github.com/cockroachdb/cockroach/pull/106952 -[#107044]: https://github.com/cockroachdb/cockroach/pull/107044 -[#107076]: https://github.com/cockroachdb/cockroach/pull/107076 -[#107081]: https://github.com/cockroachdb/cockroach/pull/107081 -[#107090]: https://github.com/cockroachdb/cockroach/pull/107090 -[#107124]: https://github.com/cockroachdb/cockroach/pull/107124 -[#107128]: https://github.com/cockroachdb/cockroach/pull/107128 -[#107133]: https://github.com/cockroachdb/cockroach/pull/107133 -[#107178]: https://github.com/cockroachdb/cockroach/pull/107178 -[#107211]: https://github.com/cockroachdb/cockroach/pull/107211 -[#107292]: https://github.com/cockroachdb/cockroach/pull/107292 -[#107294]: https://github.com/cockroachdb/cockroach/pull/107294 -[#107296]: https://github.com/cockroachdb/cockroach/pull/107296 -[#107299]: https://github.com/cockroachdb/cockroach/pull/107299 -[#107303]: https://github.com/cockroachdb/cockroach/pull/107303 -[#107309]: https://github.com/cockroachdb/cockroach/pull/107309 -[#107317]: https://github.com/cockroachdb/cockroach/pull/107317 -[#107392]: https://github.com/cockroachdb/cockroach/pull/107392 -[#107424]: https://github.com/cockroachdb/cockroach/pull/107424 -[#107474]: https://github.com/cockroachdb/cockroach/pull/107474 -[#107533]: https://github.com/cockroachdb/cockroach/pull/107533 -[#107553]: https://github.com/cockroachdb/cockroach/pull/107553 -[#107563]: https://github.com/cockroachdb/cockroach/pull/107563 -[#107572]: https://github.com/cockroachdb/cockroach/pull/107572 -[#107601]: https://github.com/cockroachdb/cockroach/pull/107601 -[#107613]: https://github.com/cockroachdb/cockroach/pull/107613 -[#107641]: https://github.com/cockroachdb/cockroach/pull/107641 -[#107658]: https://github.com/cockroachdb/cockroach/pull/107658 -[#107682]: https://github.com/cockroachdb/cockroach/pull/107682 -[#107717]: https://github.com/cockroachdb/cockroach/pull/107717 -[#107719]: https://github.com/cockroachdb/cockroach/pull/107719 -[#107720]: https://github.com/cockroachdb/cockroach/pull/107720 -[#107742]: https://github.com/cockroachdb/cockroach/pull/107742 -[#107749]: https://github.com/cockroachdb/cockroach/pull/107749 -[#107750]: https://github.com/cockroachdb/cockroach/pull/107750 -[#107796]: https://github.com/cockroachdb/cockroach/pull/107796 -[#107815]: https://github.com/cockroachdb/cockroach/pull/107815 -[#107863]: https://github.com/cockroachdb/cockroach/pull/107863 -[#107893]: https://github.com/cockroachdb/cockroach/pull/107893 -[#107920]: https://github.com/cockroachdb/cockroach/pull/107920 -[#107953]: https://github.com/cockroachdb/cockroach/pull/107953 -[#107984]: https://github.com/cockroachdb/cockroach/pull/107984 -[#108037]: https://github.com/cockroachdb/cockroach/pull/108037 -[#108047]: https://github.com/cockroachdb/cockroach/pull/108047 -[#108133]: https://github.com/cockroachdb/cockroach/pull/108133 -[#108139]: https://github.com/cockroachdb/cockroach/pull/108139 -[#108190]: https://github.com/cockroachdb/cockroach/pull/108190 -[#108210]: https://github.com/cockroachdb/cockroach/pull/108210 -[#108211]: https://github.com/cockroachdb/cockroach/pull/108211 -[#108213]: https://github.com/cockroachdb/cockroach/pull/108213 -[#108289]: https://github.com/cockroachdb/cockroach/pull/108289 -[#108290]: https://github.com/cockroachdb/cockroach/pull/108290 -[#108299]: https://github.com/cockroachdb/cockroach/pull/108299 -[#108353]: https://github.com/cockroachdb/cockroach/pull/108353 -[#108387]: https://github.com/cockroachdb/cockroach/pull/108387 -[#108401]: https://github.com/cockroachdb/cockroach/pull/108401 -[#108456]: https://github.com/cockroachdb/cockroach/pull/108456 -[#108467]: https://github.com/cockroachdb/cockroach/pull/108467 -[#108486]: https://github.com/cockroachdb/cockroach/pull/108486 -[#108566]: https://github.com/cockroachdb/cockroach/pull/108566 -[#108576]: https://github.com/cockroachdb/cockroach/pull/108576 -[#108597]: https://github.com/cockroachdb/cockroach/pull/108597 -[#108612]: https://github.com/cockroachdb/cockroach/pull/108612 -[#108627]: https://github.com/cockroachdb/cockroach/pull/108627 -[#108667]: https://github.com/cockroachdb/cockroach/pull/108667 -[#108678]: https://github.com/cockroachdb/cockroach/pull/108678 -[#108752]: https://github.com/cockroachdb/cockroach/pull/108752 -[#108757]: https://github.com/cockroachdb/cockroach/pull/108757 -[#108807]: https://github.com/cockroachdb/cockroach/pull/108807 -[#108818]: https://github.com/cockroachdb/cockroach/pull/108818 -[#108824]: https://github.com/cockroachdb/cockroach/pull/108824 -[#108852]: https://github.com/cockroachdb/cockroach/pull/108852 -[#108883]: https://github.com/cockroachdb/cockroach/pull/108883 -[#108923]: https://github.com/cockroachdb/cockroach/pull/108923 -[#108964]: https://github.com/cockroachdb/cockroach/pull/108964 -[#109047]: https://github.com/cockroachdb/cockroach/pull/109047 -[#109056]: https://github.com/cockroachdb/cockroach/pull/109056 -[#109074]: https://github.com/cockroachdb/cockroach/pull/109074 -[#109077]: https://github.com/cockroachdb/cockroach/pull/109077 -[#109164]: https://github.com/cockroachdb/cockroach/pull/109164 -[#109165]: https://github.com/cockroachdb/cockroach/pull/109165 -[#109166]: https://github.com/cockroachdb/cockroach/pull/109166 -[#109171]: https://github.com/cockroachdb/cockroach/pull/109171 -[#109174]: https://github.com/cockroachdb/cockroach/pull/109174 -[#109226]: https://github.com/cockroachdb/cockroach/pull/109226 -[#109229]: https://github.com/cockroachdb/cockroach/pull/109229 -[#109245]: https://github.com/cockroachdb/cockroach/pull/109245 -[#109258]: https://github.com/cockroachdb/cockroach/pull/109258 -[#109264]: https://github.com/cockroachdb/cockroach/pull/109264 -[#109332]: https://github.com/cockroachdb/cockroach/pull/109332 -[#109346]: https://github.com/cockroachdb/cockroach/pull/109346 -[#109349]: https://github.com/cockroachdb/cockroach/pull/109349 -[#109351]: https://github.com/cockroachdb/cockroach/pull/109351 -[#109374]: https://github.com/cockroachdb/cockroach/pull/109374 -[#109394]: https://github.com/cockroachdb/cockroach/pull/109394 -[#109424]: https://github.com/cockroachdb/cockroach/pull/109424 -[#109432]: https://github.com/cockroachdb/cockroach/pull/109432 -[#109444]: https://github.com/cockroachdb/cockroach/pull/109444 -[#109464]: https://github.com/cockroachdb/cockroach/pull/109464 -[#109474]: https://github.com/cockroachdb/cockroach/pull/109474 -[#109510]: https://github.com/cockroachdb/cockroach/pull/109510 -[#109530]: https://github.com/cockroachdb/cockroach/pull/109530 -[#109592]: https://github.com/cockroachdb/cockroach/pull/109592 -[#109659]: https://github.com/cockroachdb/cockroach/pull/109659 -[#109694]: https://github.com/cockroachdb/cockroach/pull/109694 -[#109696]: https://github.com/cockroachdb/cockroach/pull/109696 -[#109720]: https://github.com/cockroachdb/cockroach/pull/109720 -[#109727]: https://github.com/cockroachdb/cockroach/pull/109727 -[#109731]: https://github.com/cockroachdb/cockroach/pull/109731 -[#109773]: https://github.com/cockroachdb/cockroach/pull/109773 -[#109774]: https://github.com/cockroachdb/cockroach/pull/109774 -[#109785]: https://github.com/cockroachdb/cockroach/pull/109785 -[#109790]: https://github.com/cockroachdb/cockroach/pull/109790 -[#109835]: https://github.com/cockroachdb/cockroach/pull/109835 -[#109844]: https://github.com/cockroachdb/cockroach/pull/109844 -[#109872]: https://github.com/cockroachdb/cockroach/pull/109872 -[#109899]: https://github.com/cockroachdb/cockroach/pull/109899 -[#109932]: https://github.com/cockroachdb/cockroach/pull/109932 -[#110066]: https://github.com/cockroachdb/cockroach/pull/110066 -[#110107]: https://github.com/cockroachdb/cockroach/pull/110107 -[#110144]: https://github.com/cockroachdb/cockroach/pull/110144 -[#110150]: https://github.com/cockroachdb/cockroach/pull/110150 -[#110173]: https://github.com/cockroachdb/cockroach/pull/110173 -[#110174]: https://github.com/cockroachdb/cockroach/pull/110174 -[#110177]: https://github.com/cockroachdb/cockroach/pull/110177 -[#110214]: https://github.com/cockroachdb/cockroach/pull/110214 -[#110216]: https://github.com/cockroachdb/cockroach/pull/110216 -[#110220]: https://github.com/cockroachdb/cockroach/pull/110220 -[#110280]: https://github.com/cockroachdb/cockroach/pull/110280 -[#110332]: https://github.com/cockroachdb/cockroach/pull/110332 -[#110364]: https://github.com/cockroachdb/cockroach/pull/110364 -[#110391]: https://github.com/cockroachdb/cockroach/pull/110391 -[#110429]: https://github.com/cockroachdb/cockroach/pull/110429 -[#110527]: https://github.com/cockroachdb/cockroach/pull/110527 -[#42063]: https://github.com/cockroachdb/cockroach/pull/42063 -[#87301]: https://github.com/cockroachdb/cockroach/pull/87301 -[#96725]: https://github.com/cockroachdb/cockroach/pull/96725 -[#97071]: https://github.com/cockroachdb/cockroach/pull/97071 -[#97728]: https://github.com/cockroachdb/cockroach/pull/97728 -[#98203]: https://github.com/cockroachdb/cockroach/pull/98203 -[#98208]: https://github.com/cockroachdb/cockroach/pull/98208 -[#98382]: https://github.com/cockroachdb/cockroach/pull/98382 -[#98562]: https://github.com/cockroachdb/cockroach/pull/98562 -[#98709]: https://github.com/cockroachdb/cockroach/pull/98709 -[#98820]: https://github.com/cockroachdb/cockroach/pull/98820 -[#98848]: https://github.com/cockroachdb/cockroach/pull/98848 -[#98899]: https://github.com/cockroachdb/cockroach/pull/98899 -[#99057]: https://github.com/cockroachdb/cockroach/pull/99057 -[#99069]: https://github.com/cockroachdb/cockroach/pull/99069 -[#99126]: https://github.com/cockroachdb/cockroach/pull/99126 -[#99174]: https://github.com/cockroachdb/cockroach/pull/99174 -[#99191]: https://github.com/cockroachdb/cockroach/pull/99191 -[#99275]: https://github.com/cockroachdb/cockroach/pull/99275 -[#99348]: https://github.com/cockroachdb/cockroach/pull/99348 -[#99526]: https://github.com/cockroachdb/cockroach/pull/99526 -[#99789]: https://github.com/cockroachdb/cockroach/pull/99789 -[#99839]: https://github.com/cockroachdb/cockroach/pull/99839 -[#99842]: https://github.com/cockroachdb/cockroach/pull/99842 -[#99847]: https://github.com/cockroachdb/cockroach/pull/99847 -[#99876]: https://github.com/cockroachdb/cockroach/pull/99876 -[06cd54a4e]: https://github.com/cockroachdb/cockroach/commit/06cd54a4e -[09506add4]: https://github.com/cockroachdb/cockroach/commit/09506add4 -[0c660fa0d]: https://github.com/cockroachdb/cockroach/commit/0c660fa0d -[0de1c8aaf]: https://github.com/cockroachdb/cockroach/commit/0de1c8aaf -[1629e11a6]: https://github.com/cockroachdb/cockroach/commit/1629e11a6 -[1b8d40fbc]: https://github.com/cockroachdb/cockroach/commit/1b8d40fbc -[208a7eb6c]: https://github.com/cockroachdb/cockroach/commit/208a7eb6c -[20e033cf2]: https://github.com/cockroachdb/cockroach/commit/20e033cf2 -[2132ebd00]: https://github.com/cockroachdb/cockroach/commit/2132ebd00 -[22bbef16d]: https://github.com/cockroachdb/cockroach/commit/22bbef16d -[26d007b7e]: https://github.com/cockroachdb/cockroach/commit/26d007b7e -[27af3713a]: https://github.com/cockroachdb/cockroach/commit/27af3713a -[28fe10f03]: https://github.com/cockroachdb/cockroach/commit/28fe10f03 -[29edf817d]: https://github.com/cockroachdb/cockroach/commit/29edf817d -[2b1553ce6]: https://github.com/cockroachdb/cockroach/commit/2b1553ce6 -[2b39f1af4]: https://github.com/cockroachdb/cockroach/commit/2b39f1af4 -[2bbd66e03]: https://github.com/cockroachdb/cockroach/commit/2bbd66e03 -[2f782d32a]: https://github.com/cockroachdb/cockroach/commit/2f782d32a -[311fdc318]: https://github.com/cockroachdb/cockroach/commit/311fdc318 -[338e99bb4]: https://github.com/cockroachdb/cockroach/commit/338e99bb4 -[346bbc9a9]: https://github.com/cockroachdb/cockroach/commit/346bbc9a9 -[36d18daf8]: https://github.com/cockroachdb/cockroach/commit/36d18daf8 -[36e098f6b]: https://github.com/cockroachdb/cockroach/commit/36e098f6b -[3b35d3b0d]: https://github.com/cockroachdb/cockroach/commit/3b35d3b0d -[3c12e0d20]: https://github.com/cockroachdb/cockroach/commit/3c12e0d20 -[3c64af25d]: https://github.com/cockroachdb/cockroach/commit/3c64af25d -[3faf1e1eb]: https://github.com/cockroachdb/cockroach/commit/3faf1e1eb -[401cc12f8]: https://github.com/cockroachdb/cockroach/commit/401cc12f8 -[43cc87f80]: https://github.com/cockroachdb/cockroach/commit/43cc87f80 -[4741147f7]: https://github.com/cockroachdb/cockroach/commit/4741147f7 -[488e5a3ae]: https://github.com/cockroachdb/cockroach/commit/488e5a3ae -[49855990f]: https://github.com/cockroachdb/cockroach/commit/49855990f -[4b5fa7ce6]: https://github.com/cockroachdb/cockroach/commit/4b5fa7ce6 -[4e8a998bc]: https://github.com/cockroachdb/cockroach/commit/4e8a998bc -[4f02d6c09]: https://github.com/cockroachdb/cockroach/commit/4f02d6c09 -[5320237ce]: https://github.com/cockroachdb/cockroach/commit/5320237ce -[53f2fcd9a]: https://github.com/cockroachdb/cockroach/commit/53f2fcd9a -[55745a86e]: https://github.com/cockroachdb/cockroach/commit/55745a86e -[5b3e4e311]: https://github.com/cockroachdb/cockroach/commit/5b3e4e311 -[5bf79409d]: https://github.com/cockroachdb/cockroach/commit/5bf79409d -[5c1909bde]: https://github.com/cockroachdb/cockroach/commit/5c1909bde -[5c2920177]: https://github.com/cockroachdb/cockroach/commit/5c2920177 -[5ef1e3442]: https://github.com/cockroachdb/cockroach/commit/5ef1e3442 -[5f4496b89]: https://github.com/cockroachdb/cockroach/commit/5f4496b89 -[5fe941557]: https://github.com/cockroachdb/cockroach/commit/5fe941557 -[62665c54d]: https://github.com/cockroachdb/cockroach/commit/62665c54d -[655588aa2]: https://github.com/cockroachdb/cockroach/commit/655588aa2 -[6886ce4ce]: https://github.com/cockroachdb/cockroach/commit/6886ce4ce -[69c682c17]: https://github.com/cockroachdb/cockroach/commit/69c682c17 -[6f1a0761a]: https://github.com/cockroachdb/cockroach/commit/6f1a0761a -[70b00643b]: https://github.com/cockroachdb/cockroach/commit/70b00643b -[70c51c0a3]: https://github.com/cockroachdb/cockroach/commit/70c51c0a3 -[70dce126e]: https://github.com/cockroachdb/cockroach/commit/70dce126e -[746d4ff92]: https://github.com/cockroachdb/cockroach/commit/746d4ff92 -[759e606b8]: https://github.com/cockroachdb/cockroach/commit/759e606b8 -[75ec8b7cd]: https://github.com/cockroachdb/cockroach/commit/75ec8b7cd -[76860d69b]: https://github.com/cockroachdb/cockroach/commit/76860d69b -[7846c54ee]: https://github.com/cockroachdb/cockroach/commit/7846c54ee -[788decfef]: https://github.com/cockroachdb/cockroach/commit/788decfef -[7a6dbc31b]: https://github.com/cockroachdb/cockroach/commit/7a6dbc31b -[7c307a8e6]: https://github.com/cockroachdb/cockroach/commit/7c307a8e6 -[7f433bb0f]: https://github.com/cockroachdb/cockroach/commit/7f433bb0f -[7fa8b09c9]: https://github.com/cockroachdb/cockroach/commit/7fa8b09c9 -[836ca4355]: https://github.com/cockroachdb/cockroach/commit/836ca4355 -[85c5ff414]: https://github.com/cockroachdb/cockroach/commit/85c5ff414 -[89639fc5d]: https://github.com/cockroachdb/cockroach/commit/89639fc5d -[8a54f4d3c]: https://github.com/cockroachdb/cockroach/commit/8a54f4d3c -[8eecff7cf]: https://github.com/cockroachdb/cockroach/commit/8eecff7cf -[8fc9cf3bf]: https://github.com/cockroachdb/cockroach/commit/8fc9cf3bf -[90862db03]: https://github.com/cockroachdb/cockroach/commit/90862db03 -[9194c4fb3]: https://github.com/cockroachdb/cockroach/commit/9194c4fb3 -[92def9e4e]: https://github.com/cockroachdb/cockroach/commit/92def9e4e -[9332af775]: https://github.com/cockroachdb/cockroach/commit/9332af775 -[9397945fd]: https://github.com/cockroachdb/cockroach/commit/9397945fd -[968870b00]: https://github.com/cockroachdb/cockroach/commit/968870b00 -[9816f30fc]: https://github.com/cockroachdb/cockroach/commit/9816f30fc -[98744d378]: https://github.com/cockroachdb/cockroach/commit/98744d378 -[9885b80f3]: https://github.com/cockroachdb/cockroach/commit/9885b80f3 -[99c8020da]: https://github.com/cockroachdb/cockroach/commit/99c8020da -[9b5991d4d]: https://github.com/cockroachdb/cockroach/commit/9b5991d4d -[9d96bb69d]: https://github.com/cockroachdb/cockroach/commit/9d96bb69d -[9faf13771]: https://github.com/cockroachdb/cockroach/commit/9faf13771 -[a2545b4f8]: https://github.com/cockroachdb/cockroach/commit/a2545b4f8 -[a65c98636]: https://github.com/cockroachdb/cockroach/commit/a65c98636 -[a732de028]: https://github.com/cockroachdb/cockroach/commit/a732de028 -[a83c0e1e8]: https://github.com/cockroachdb/cockroach/commit/a83c0e1e8 -[a9358f9cb]: https://github.com/cockroachdb/cockroach/commit/a9358f9cb -[ab9f165ef]: https://github.com/cockroachdb/cockroach/commit/ab9f165ef -[af4df7927]: https://github.com/cockroachdb/cockroach/commit/af4df7927 -[b0360e59b]: https://github.com/cockroachdb/cockroach/commit/b0360e59b -[b1a554371]: https://github.com/cockroachdb/cockroach/commit/b1a554371 -[b2add06c5]: https://github.com/cockroachdb/cockroach/commit/b2add06c5 -[b40009496]: https://github.com/cockroachdb/cockroach/commit/b40009496 -[b4dead6ac]: https://github.com/cockroachdb/cockroach/commit/b4dead6ac -[b56d4660f]: https://github.com/cockroachdb/cockroach/commit/b56d4660f -[ba3b8a4b8]: https://github.com/cockroachdb/cockroach/commit/ba3b8a4b8 -[bb5d813e5]: https://github.com/cockroachdb/cockroach/commit/bb5d813e5 -[bc8c500f8]: https://github.com/cockroachdb/cockroach/commit/bc8c500f8 -[bea148291]: https://github.com/cockroachdb/cockroach/commit/bea148291 -[bed253e8c]: https://github.com/cockroachdb/cockroach/commit/bed253e8c -[c096bd7f3]: https://github.com/cockroachdb/cockroach/commit/c096bd7f3 -[c2e4b695b]: https://github.com/cockroachdb/cockroach/commit/c2e4b695b -[c42294531]: https://github.com/cockroachdb/cockroach/commit/c42294531 -[c968a33a5]: https://github.com/cockroachdb/cockroach/commit/c968a33a5 -[caaa7d8c2]: https://github.com/cockroachdb/cockroach/commit/caaa7d8c2 -[cc232325e]: https://github.com/cockroachdb/cockroach/commit/cc232325e -[ceb5cd52d]: https://github.com/cockroachdb/cockroach/commit/ceb5cd52d -[cfa437560]: https://github.com/cockroachdb/cockroach/commit/cfa437560 -[d14d7b678]: https://github.com/cockroachdb/cockroach/commit/d14d7b678 -[d1c7fcd34]: https://github.com/cockroachdb/cockroach/commit/d1c7fcd34 -[d21226daa]: https://github.com/cockroachdb/cockroach/commit/d21226daa -[d795e6d12]: https://github.com/cockroachdb/cockroach/commit/d795e6d12 -[db60a0718]: https://github.com/cockroachdb/cockroach/commit/db60a0718 -[df30a451b]: https://github.com/cockroachdb/cockroach/commit/df30a451b -[e3c07672d]: https://github.com/cockroachdb/cockroach/commit/e3c07672d -[e3e9b77b9]: https://github.com/cockroachdb/cockroach/commit/e3e9b77b9 -[e9f52e5fb]: https://github.com/cockroachdb/cockroach/commit/e9f52e5fb -[ea5a2ff15]: https://github.com/cockroachdb/cockroach/commit/ea5a2ff15 -[ec0ca0730]: https://github.com/cockroachdb/cockroach/commit/ec0ca0730 -[f0da5e8ec]: https://github.com/cockroachdb/cockroach/commit/f0da5e8ec -[f2ab387b9]: https://github.com/cockroachdb/cockroach/commit/f2ab387b9 -[f3619c1e1]: https://github.com/cockroachdb/cockroach/commit/f3619c1e1 -[f5800498f]: https://github.com/cockroachdb/cockroach/commit/f5800498f -[f9aca5d95]: https://github.com/cockroachdb/cockroach/commit/f9aca5d95 -[fb8f99b15]: https://github.com/cockroachdb/cockroach/commit/fb8f99b15 -[fc5920459]: https://github.com/cockroachdb/cockroach/commit/fc5920459 -[fedb692ae]: https://github.com/cockroachdb/cockroach/commit/fedb692ae -[fef97d93c]: https://github.com/cockroachdb/cockroach/commit/fef97d93c -[#110624]: https://github.com/cockroachdb/cockroach/pull/110624 -[#110623]: https://github.com/cockroachdb/cockroach/pull/110623 -[#107788]: https://github.com/cockroachdb/cockroach/pull/107788 -[#109042]: https://github.com/cockroachdb/cockroach/pull/109042 -[#109521]: https://github.com/cockroachdb/cockroach/pull/109521 -[#110078]: https://github.com/cockroachdb/cockroach/pull/110078 -[#110625]: https://github.com/cockroachdb/cockroach/pull/110625 -[#110130]: https://github.com/cockroachdb/cockroach/pull/110130 -[#110252]: https://github.com/cockroachdb/cockroach/pull/110252 -[#110900]: https://github.com/cockroachdb/cockroach/pull/110900 -[#107587]: https://github.com/cockroachdb/cockroach/pull/107587 -[#106515]: https://github.com/cockroachdb/cockroach/pull/106515 -[#84565]: https://github.com/cockroachdb/cockroach/pull/84565 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.2.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.2.md index 67f37ef82b7..8d35f9df9b3 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.2.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.2.md @@ -6,19 +6,19 @@ Release Date: October 2, 2023

Security updates

-- The `SIGHUP` signal now clears the cached expiration times for [client certificates]({% link v23.2/cockroach-cert.md %}#how-security-certificates-work) that are reported by the `security.certificate.expiration.client` metric. [#110726][#110726] +- The `SIGHUP` signal now clears the cached expiration times for [client certificates]({% link v23.2/cockroach-cert.md %}#how-security-certificates-work) that are reported by the `security.certificate.expiration.client` metric. #110726

General changes

-- Increased the maximum permitted value of the `COCKROACH_RPC_INITIAL_WINDOW_SIZE` environment variable to 64MB. In conjunction with tuning your operating system's maximum TCP window size, this can increase the throughput that Raft replication can sustain over high latency network links. [#111255][#111255] +- Increased the maximum permitted value of the `COCKROACH_RPC_INITIAL_WINDOW_SIZE` environment variable to 64MB. In conjunction with tuning your operating system's maximum TCP window size, this can increase the throughput that Raft replication can sustain over high latency network links. #111255

SQL language changes

-- The `discard` [log message]({% link v23.2/logging-overview.md %}) is now limited to once per minute by default. The message now includes both the number of transactions and the number of statements that were discarded. [#110805][#110805] -- The [cluster setting]({% link v23.2/cluster-settings.md %}) `kv.rangefeed.enabled` no longer controls access to `RANGEFEED SQL` commands. Instead, use `feature.changefeed.enabled`. [#110676][#110676] -- SQL commands that were previously limited to the `admin` [system privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#supported-privileges) can now be used by users with the `VIEWCLUSTERMETADATA` or `REPAIRCLUSTERMETADATA` system privilege, depending on whether the operation is read-only or modifies state. [#110084][#110084] -- Added a `last_error` column to the `cluster_execution_insights`, `node_execution_insights`, `cluster_txn_execution_insights`, and `node_txn_execution_insights` tables. These columns contain error messages for failed executions. [#110565][#110565] -- The new backup option `updates_cluster_monitoring_metrics` tracks the timestamp of the last backup failure due to a KMS error. This option is disabled by default. [#104634][#104634] +- The `discard` [log message]({% link v23.2/logging-overview.md %}) is now limited to once per minute by default. The message now includes both the number of transactions and the number of statements that were discarded. #110805 +- The [cluster setting]({% link v23.2/cluster-settings.md %}) `kv.rangefeed.enabled` no longer controls access to `RANGEFEED SQL` commands. Instead, use `feature.changefeed.enabled`. #110676 +- SQL commands that were previously limited to the `admin` [system privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#supported-privileges) can now be used by users with the `VIEWCLUSTERMETADATA` or `REPAIRCLUSTERMETADATA` system privilege, depending on whether the operation is read-only or modifies state. #110084 +- Added a `last_error` column to the `cluster_execution_insights`, `node_execution_insights`, `cluster_txn_execution_insights`, and `node_txn_execution_insights` tables. These columns contain error messages for failed executions. #110565 +- The new backup option `updates_cluster_monitoring_metrics` tracks the timestamp of the last backup failure due to a KMS error. This option is disabled by default. #104634 - The new restore option `strip_localities` optionally strips the locality information from a backup when restoring to a cluster with different regions than the source cluster. Restoring a cluster or database that contains regional-by-row tables, or restoring a regional-by-row table, requires you to modify the database: @@ -30,7 +30,7 @@ Release Date: October 2, 2023 This change is part of a larger effort, and this feature is subject to change. - [#110606][#110606] + #110606 - Added a check to disallow queries that use predicate locking, since explicit uniqueness checks are not yet supported under Read Committed isolation. `INSERT`, `UPDATE`, and `UPSERT` statements against some `REGIONAL BY ROW` tables will fail under Read Committed isolation with the following error: @@ -38,82 +38,51 @@ Release Date: October 2, 2023 unimplemented: explicit unique checks are not yet supported under read committed isolation SQLSTATE: 0A000 ~~~ - For more details about which `REGIONAL BY ROW` tables are affected, refer to [Issue #110873](https://github.com/cockroachdb/cockroach/issues/110873). + For more details about which `REGIONAL BY ROW` tables are affected, refer to Issue #110873. - [#110879][#110879] -- The `created` field produced by `SHOW STATISTICS` has been updated from `TIMESTAMP` to `TIMESTAMPTZ`. Statistic creation times are now displayed in the session time zone if it is set. [#110753][#110753] + #110879 +- The `created` field produced by `SHOW STATISTICS` has been updated from `TIMESTAMP` to `TIMESTAMPTZ`. Statistic creation times are now displayed in the session time zone if it is set. #110753

Operational changes

-- Removed the node-level `engine.stalls` timeseries metric. This metric has not been updated for several releases. [#110936][#110936] +- Removed the node-level `engine.stalls` timeseries metric. This metric has not been updated for several releases. #110936

DB Console changes

-- The legend is now always displayed on charts in DB Console Metrics pages. In addition, when you select an item from the legend that represents a single line in the chart, that line is selected in the chart. [#110809][#110809] -- When collecting a [statement bundle]({% link v23.2/cockroach-statement-diag.md %}), you can now filter by a specific [plan gist]({% link v23.2/ui-statements-page.md %}#explain-plans) or collect diagnostics for all plan gists. [#110931][#110931] -- [Statement]({% link v23.2/ui-statements-page.md %}) and [Transaction]({% link v23.2/ui-transactions-page.md %}) detail pages now include an **Error Message** row. Users with the `VIEWACTIVITY` [system privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#supported-privileges) can view the full error message, and users with the `VIEWACTIVTYREDACTED` system privilege can view the redacted error message. If a user has both privileges, `VIEWACTIVITYTREDACTED`` takes precedence. [#110849][#110849] -- A new dashboard in the [SQL Dashboard page]({% link v23.2/ui-sql-dashboard.md %}) tracks how often distributed queries with errors were rerun using the "rerun as local" mechanism, as well as how often those reruns failed. the number of times distributed queries that resulted in errors were rerun as local as well as when those reruns failed. The "rerun as local" mechanism is new in v23.2 and is enabled by default. For more information, contact your Cockroach Labs account representative. [#110619][#110619] -- The DB Console [Insights page](https://cockroachlabs.com/docs/v23.2/ui-insights-page) now shows the error message when a transaction fails at the `COMMIT` stage. [#110898][#110898] +- The legend is now always displayed on charts in DB Console Metrics pages. In addition, when you select an item from the legend that represents a single line in the chart, that line is selected in the chart. #110809 +- When collecting a [statement bundle]({% link v23.2/cockroach-statement-diag.md %}), you can now filter by a specific [plan gist]({% link v23.2/ui-statements-page.md %}#explain-plans) or collect diagnostics for all plan gists. #110931 +- [Statement]({% link v23.2/ui-statements-page.md %}) and [Transaction]({% link v23.2/ui-transactions-page.md %}) detail pages now include an **Error Message** row. Users with the `VIEWACTIVITY` [system privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#supported-privileges) can view the full error message, and users with the `VIEWACTIVTYREDACTED` system privilege can view the redacted error message. If a user has both privileges, `VIEWACTIVITYTREDACTED`` takes precedence. #110849 +- A new dashboard in the [SQL Dashboard page]({% link v23.2/ui-sql-dashboard.md %}) tracks how often distributed queries with errors were rerun using the "rerun as local" mechanism, as well as how often those reruns failed. the number of times distributed queries that resulted in errors were rerun as local as well as when those reruns failed. The "rerun as local" mechanism is new in v23.2 and is enabled by default. For more information, contact your Cockroach Labs account representative. #110619 +- The DB Console [Insights page](https://cockroachlabs.com/docs/v23.2/ui-insights-page) now shows the error message when a transaction fails at the `COMMIT` stage. #110898 - The [Overload Dashboard page]({% link v23.2/ui-overload-dashboard.md %}) now includes the following graphs to monitor [admission control](https://www.cockroachlabs.com/docs/v23.2/admission-control.html): - **IO Overload** - Charts normalized metric based on admission control target thresholds. Replaces **LSM L0 Health** graph which used raw metrics. - **KV Admission Slots Exhausted** - Replaces **KV Admission Slots** graph. - **Flow Tokens Wait Time: 75th percentile** - Use to monitor the new replication admission control feature. - **Requests Waiting For Flow Tokens** - Use to monitor the new replication admission control feature. - **Blocked Replication Streams** - Use to monitor the new replication admission control feature. -[#110135][#110135] +#110135

Bug fixes

-- Fixed a race condition in the [Replica lifecycle](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer) that could result in a failed SQL request when the request could have been successfully retried. [#110806][#110806] -- Fixed a bug where a [`CREATE TABLE`]({% link v23.2/create-table.md %}) command with an `IDENTITY` column did not properly propagate the type of the column into the sequence. [#110621][#110621] -- Fixed a panic when decoding a gist in a foreign database that does not contain a table referred to by the gist. [#110966][#110966] -- A synthetic `dropped` column have been added to the `pg_attribute` table. This column tracks the attribution numbers for dropped attributions, to work around issues with ORMs that are not designed to handle gaps in attribution numbering in the `pg_attribute` table. [#111019][#111019] -- Fixed a rare internal error in the `unnest` and `information_schema._pg_expandarray` [built-in functions]({% link v23.2/functions-and-operators.md %}) where passed string arguments could be cast to an array. [#110956][#110956] -- External connection URLs now accept the scheme `azure-blob` for connections to Azure Blob Storage and the scheme `azure-kms` for connections to Azure KMS. For backward compatibility, schemes `azure` and `azure-storage` schemes continue to work for connections to Azure Blob Storage. [#111217][#111217] -- Fixed a bug where vectorized `COPY FROM` could produce a plan with more than one RenderNodes, when only zero or one should be allowed. This could result in multiple render nodes in a table with a hash sharded primary key. [#111284][#111284] -- Fixed a bug in DB Console's Statement Diagnostic page that could cause the page to crash if the response was larger than 50 KB. The page now keeps pulling results until no maximum size errors are encountered. [#111128][#111128] -- Fixed a bug where DB Console instances proxied at different subpaths that use OIDC pointed to an incorrect relative OIDC login path. [#111240][#111240] -- Fixed a bug where changing the setting `server.telemetry.hot_ranges_stats.interval` had no effect. [#111305][#111305] +- Fixed a race condition in the [Replica lifecycle](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer) that could result in a failed SQL request when the request could have been successfully retried. #110806 +- Fixed a bug where a [`CREATE TABLE`]({% link v23.2/create-table.md %}) command with an `IDENTITY` column did not properly propagate the type of the column into the sequence. #110621 +- Fixed a panic when decoding a gist in a foreign database that does not contain a table referred to by the gist. #110966 +- A synthetic `dropped` column have been added to the `pg_attribute` table. This column tracks the attribution numbers for dropped attributions, to work around issues with ORMs that are not designed to handle gaps in attribution numbering in the `pg_attribute` table. #111019 +- Fixed a rare internal error in the `unnest` and `information_schema._pg_expandarray` [built-in functions]({% link v23.2/functions-and-operators.md %}) where passed string arguments could be cast to an array. #110956 +- External connection URLs now accept the scheme `azure-blob` for connections to Azure Blob Storage and the scheme `azure-kms` for connections to Azure KMS. For backward compatibility, schemes `azure` and `azure-storage` schemes continue to work for connections to Azure Blob Storage. #111217 +- Fixed a bug where vectorized `COPY FROM` could produce a plan with more than one RenderNodes, when only zero or one should be allowed. This could result in multiple render nodes in a table with a hash sharded primary key. #111284 +- Fixed a bug in DB Console's Statement Diagnostic page that could cause the page to crash if the response was larger than 50 KB. The page now keeps pulling results until no maximum size errors are encountered. #111128 +- Fixed a bug where DB Console instances proxied at different subpaths that use OIDC pointed to an incorrect relative OIDC login path. #111240 +- Fixed a bug where changing the setting `server.telemetry.hot_ranges_stats.interval` had no effect. #111305

Performance improvements

-- Fixed a performance bug that could result in rewriting a 128-MB file each time a store file is created, renamed, or removed when [Encryption At Rest](https://www.cockroachlabs.com/docs/v23.2/security-reference/encryption#encryption-at-rest) is enabled on a large store with many small files. [#111069][#111069] -- Improved compaction heuristics to mitigate read amplification growth and admission control throttling when processing large deletes, such as during node decommissioning, replica rebalancing, or when dropping tables. [#111277][#111277] +- Fixed a performance bug that could result in rewriting a 128-MB file each time a store file is created, renamed, or removed when [Encryption At Rest](https://www.cockroachlabs.com/docs/v23.2/security-reference/encryption#encryption-at-rest) is enabled on a large store with many small files. #111069 +- Improved compaction heuristics to mitigate read amplification growth and admission control throttling when processing large deletes, such as during node decommissioning, replica rebalancing, or when dropping tables. #111277

Contributors

-This release includes [157 merged PRs by 54 authors](https://github.com/cockroachdb/cockroach/compare/v23.2.0-alpha.1...2111b61b2d7c789bc03b1e9392062df80c779075). +This release includes 157 merged PRs by 54 authors. -[#104634]: https://github.com/cockroachdb/cockroach/pull/104634 -[#110084]: https://github.com/cockroachdb/cockroach/pull/110084 -[#110135]: https://github.com/cockroachdb/cockroach/pull/110135 -[#110565]: https://github.com/cockroachdb/cockroach/pull/110565 -[#110606]: https://github.com/cockroachdb/cockroach/pull/110606 -[#110609]: https://github.com/cockroachdb/cockroach/pull/110609 -[#110619]: https://github.com/cockroachdb/cockroach/pull/110619 -[#110621]: https://github.com/cockroachdb/cockroach/pull/110621 -[#110676]: https://github.com/cockroachdb/cockroach/pull/110676 -[#110726]: https://github.com/cockroachdb/cockroach/pull/110726 -[#110753]: https://github.com/cockroachdb/cockroach/pull/110753 -[#110805]: https://github.com/cockroachdb/cockroach/pull/110805 -[#110806]: https://github.com/cockroachdb/cockroach/pull/110806 -[#110809]: https://github.com/cockroachdb/cockroach/pull/110809 -[#110849]: https://github.com/cockroachdb/cockroach/pull/110849 -[#110879]: https://github.com/cockroachdb/cockroach/pull/110879 -[#110898]: https://github.com/cockroachdb/cockroach/pull/110898 -[#110931]: https://github.com/cockroachdb/cockroach/pull/110931 -[#110936]: https://github.com/cockroachdb/cockroach/pull/110936 -[#110956]: https://github.com/cockroachdb/cockroach/pull/110956 -[#110966]: https://github.com/cockroachdb/cockroach/pull/110966 -[#111019]: https://github.com/cockroachdb/cockroach/pull/111019 -[#111069]: https://github.com/cockroachdb/cockroach/pull/111069 -[#111115]: https://github.com/cockroachdb/cockroach/pull/111115 -[#111128]: https://github.com/cockroachdb/cockroach/pull/111128 -[#111217]: https://github.com/cockroachdb/cockroach/pull/111217 -[#111240]: https://github.com/cockroachdb/cockroach/pull/111240 -[#111255]: https://github.com/cockroachdb/cockroach/pull/111255 -[#111277]: https://github.com/cockroachdb/cockroach/pull/111277 -[#111284]: https://github.com/cockroachdb/cockroach/pull/111284 -[#111305]: https://github.com/cockroachdb/cockroach/pull/111305 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.3.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.3.md index 62a75fb043d..b7af3a7740f 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.3.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.3.md @@ -69,37 +69,37 @@ Release Date: October 10, 2023

{{ site.data.products.enterprise }} edition changes

-- [Changefeeds](../v23.2/create-and-configure-changefeeds.html) now support the `confluent-cloud://` sink scheme. This scheme can be used to connect to Kafka hosted on Confluent Cloud. The scheme functions identically to Kafka, but it has it's own authentication parameters. Namely, it requires `api_key` and `api_secret` to be passed as parameters in the sink URI. They must be URL encoded. An example URI is: `'confluent-cloud://pkc-lzvrd.us-west4.gcp.confluent.cloud:9092?api_key=&api_secret='`. By default, the options `tls_enabled=true`, `sasl_handshake=true`, `sasl_enabled=true`, and `sasl_mechanism=PLAIN` are applied. For more information about authenticating with Confluent Cloud, see https://docs.confluent.io/platform/current/security/security_tutorial.html#overview. The sink scheme still supports non-authentication parameters such as `topic_name` and `topic_prefix`. It also supports the standard Kafka changefeed options (ex. `kafka_sink_config`). [#111368][#111368] +- [Changefeeds](../v23.2/create-and-configure-changefeeds.html) now support the `confluent-cloud://` sink scheme. This scheme can be used to connect to Kafka hosted on Confluent Cloud. The scheme functions identically to Kafka, but it has it's own authentication parameters. Namely, it requires `api_key` and `api_secret` to be passed as parameters in the sink URI. They must be URL encoded. An example URI is: `'confluent-cloud://pkc-lzvrd.us-west4.gcp.confluent.cloud:9092?api_key=&api_secret='`. By default, the options `tls_enabled=true`, `sasl_handshake=true`, `sasl_enabled=true`, and `sasl_mechanism=PLAIN` are applied. For more information about authenticating with Confluent Cloud, see https://docs.confluent.io/platform/current/security/security_tutorial.html#overview. The sink scheme still supports non-authentication parameters such as `topic_name` and `topic_prefix`. It also supports the standard Kafka changefeed options (ex. `kafka_sink_config`). #111368

SQL language changes

-- The [`RESTORE`](../v23.2/restore.html) option `strip_localities`, which was added in [#110606](https://github.com/cockroachdb/cockroach/pull/110606), has been renamed to `remove_regions`. This option will lead to a "region-less restore"; it is used to strip the locality and region information from a backup when there are mismatched cluster regions between the backup's cluster and the target cluster. Note that a restore using this option will fail if the backup's cluster had [`REGIONAL BY ROW`](../v23.2/multiregion-overview.html#table-locality) table localities. This is because the `RESTORE` statement has a contract that all tables must be available to serve writes once it finishes. [#111356][#111356] -- Added initial support for executing the PLpgSQL `OPEN` statement, which allows a PLpgSQL routine to create a [cursor](../v23.2/cursors.html). Currently, opening bound or unnamed cursors is not supported. In addition, `OPEN` statements cannot be used in a routine with an exception block. [#110709][#110709] -- Added support for declaring bound [cursors](../v23.2/cursors.html), which associate a query with a cursor in a PLpgSQL routine before it is opened. [#111092][#111092] -- The `SELECT FOR SHARE` and `SELECT FOR KEY SHARE` statements previously did not acquire any locks. Users issuing these statements would expect them to acquire shared locks (multiple readers allowed, but no writers). This patch switches over the behavior to acquire such read locks when the user has selected the [`READ COMMITTED` isolation level](../v23.2/transactions.html#isolation-levels). For serializable transactions, we default to the previous behavior, unless the `enable_shared_locking_for_serializable` [session setting](../v23.2/set-vars.html) is set to `true`. [#109638][#109638] -- When a PLpgSQL exception handler catches an error, it now rolls back any changes to database state that occurred within the block. Exception blocks are not currently permitted to catch [`40001`](../v23.2/common-errors.html#restart-transaction) and [`40003`](../v23.2/common-errors.html#result-is-ambiguous) errors. [#110998][#110998] -- Added support for unnamed PLpgSQL [cursors](../v23.2/cursors.html), which generate a unique name when no cursor name was specified. [#111329][#111329] -- Fixed a bug that caused CockroachDB to stop collecting new statistics about [Statement fingerprints](../v23.2/ui-statements-page.html#statement-fingerprint-page) and [Transaction fingerprints](../v23.2/ui-transactions-page.html). [#111613][#111613] -- Make the `max_event_frequency` [metric](../v23.2/metrics.html) visible for public documentation and usage. This is the maximum event frequency at which we sample executions for telemetry. [#111594][#111594] +- The [`RESTORE`](../v23.2/restore.html) option `strip_localities`, which was added in #110606, has been renamed to `remove_regions`. This option will lead to a "region-less restore"; it is used to strip the locality and region information from a backup when there are mismatched cluster regions between the backup's cluster and the target cluster. Note that a restore using this option will fail if the backup's cluster had [`REGIONAL BY ROW`](../v23.2/multiregion-overview.html#table-locality) table localities. This is because the `RESTORE` statement has a contract that all tables must be available to serve writes once it finishes. #111356 +- Added initial support for executing the PLpgSQL `OPEN` statement, which allows a PLpgSQL routine to create a [cursor](../v23.2/cursors.html). Currently, opening bound or unnamed cursors is not supported. In addition, `OPEN` statements cannot be used in a routine with an exception block. #110709 +- Added support for declaring bound [cursors](../v23.2/cursors.html), which associate a query with a cursor in a PLpgSQL routine before it is opened. #111092 +- The `SELECT FOR SHARE` and `SELECT FOR KEY SHARE` statements previously did not acquire any locks. Users issuing these statements would expect them to acquire shared locks (multiple readers allowed, but no writers). This patch switches over the behavior to acquire such read locks when the user has selected the [`READ COMMITTED` isolation level](../v23.2/transactions.html#isolation-levels). For serializable transactions, we default to the previous behavior, unless the `enable_shared_locking_for_serializable` [session setting](../v23.2/set-vars.html) is set to `true`. #109638 +- When a PLpgSQL exception handler catches an error, it now rolls back any changes to database state that occurred within the block. Exception blocks are not currently permitted to catch [`40001`](../v23.2/common-errors.html#restart-transaction) and [`40003`](../v23.2/common-errors.html#result-is-ambiguous) errors. #110998 +- Added support for unnamed PLpgSQL [cursors](../v23.2/cursors.html), which generate a unique name when no cursor name was specified. #111329 +- Fixed a bug that caused CockroachDB to stop collecting new statistics about [Statement fingerprints](../v23.2/ui-statements-page.html#statement-fingerprint-page) and [Transaction fingerprints](../v23.2/ui-transactions-page.html). #111613 +- Make the `max_event_frequency` [metric](../v23.2/metrics.html) visible for public documentation and usage. This is the maximum event frequency at which we sample executions for telemetry. #111594

Operational changes

-- Added the following [metrics](../v23.2/metrics.html) for [Raft](../v23.2/architecture/replication-layer.html#raft) proposals and reproposals: `raft.commands.proposed`, `raft.commands.reproposed.unchanged`, and `raft.commands.reproposed.new-lai`. [#111272][#111272] -- Removed the [cluster setting](../v23.2/cluster-settings.html) `spanconfig.store.enabled` and the ability to use the `COCKROACH_DISABLE_SPAN_CONFIGS` environment variable. [#110253][#110253] -- Renamed the [metric](../v23.2/metrics.html) `fluent.sink.conn.errors` to `log.fluent.sink.conn.errors`. The addition of the `log.` prefix was to better group together logging-related metrics. The behavior and purpose of the metric remains unchanged. [#111126][#111126] -- Set the Metric Type metadata on the [metric](../v23.2/metrics.html) `log.fluent.sink.conn.errors`. Previously, the Metric Type was incorrectly left unset. Note that this is an update to the metric's metadata; the behavior and purpose of the metric remains unchanged. [#111126][#111126] -- Added a new [metric](../v23.2/metrics.html) `log.buffered.messages.dropped`. Buffered network logging sinks have a `max-buffer-size` attribute, which determines, in bytes, how many log messages can be buffered. Any `fluent-server` or `http-server` log sink that makes use of a `buffering` attribute in its configuration (enabled by default) qualifies as a buffered network logging sink. If this buffer becomes full, and an additional log message is sent to the buffered log sink, the buffer would exceed this `max-buffer-size`. Therefore, the buffered log sink drops older messages in the buffer to handle, in order to make room for the new. `log.buffered.messages.dropped` counts the number of messages dropped from the buffer. Note that the count is shared across all buffered logging sinks. [#111126][#111126] -- Added the [metric](../v23.2/metrics.html) `log.messages.count`. This metric measures the count of messages logged on the node since startup. Note that this does not measure the fan-out of single log messages to the various configured [logging sinks](../v23.2/configure-logs.html#set-logging-levels). This metric can be helpful in understanding log rates and volumes. [#111126][#111126] -- Added the `file-based-headers` field found in the `http-defaults` section of the log config, which accepts 'key-filepath' pairs. This allows values found at filepaths to be updated without restarting the cluster by sending `SIGHUP` to notify that values need to be refreshed. [#111235][#111235] -- Added the [cluster setting](../v23.2/cluster-settings.html) `kv.snapshot.ingest_as_write_threshold`, which controls the size threshold below which snapshots are converted to regular writes. It defaults to `100KiB`. [#110943][#110943] +- Added the following [metrics](../v23.2/metrics.html) for [Raft](../v23.2/architecture/replication-layer.html#raft) proposals and reproposals: `raft.commands.proposed`, `raft.commands.reproposed.unchanged`, and `raft.commands.reproposed.new-lai`. #111272 +- Removed the [cluster setting](../v23.2/cluster-settings.html) `spanconfig.store.enabled` and the ability to use the `COCKROACH_DISABLE_SPAN_CONFIGS` environment variable. #110253 +- Renamed the [metric](../v23.2/metrics.html) `fluent.sink.conn.errors` to `log.fluent.sink.conn.errors`. The addition of the `log.` prefix was to better group together logging-related metrics. The behavior and purpose of the metric remains unchanged. #111126 +- Set the Metric Type metadata on the [metric](../v23.2/metrics.html) `log.fluent.sink.conn.errors`. Previously, the Metric Type was incorrectly left unset. Note that this is an update to the metric's metadata; the behavior and purpose of the metric remains unchanged. #111126 +- Added a new [metric](../v23.2/metrics.html) `log.buffered.messages.dropped`. Buffered network logging sinks have a `max-buffer-size` attribute, which determines, in bytes, how many log messages can be buffered. Any `fluent-server` or `http-server` log sink that makes use of a `buffering` attribute in its configuration (enabled by default) qualifies as a buffered network logging sink. If this buffer becomes full, and an additional log message is sent to the buffered log sink, the buffer would exceed this `max-buffer-size`. Therefore, the buffered log sink drops older messages in the buffer to handle, in order to make room for the new. `log.buffered.messages.dropped` counts the number of messages dropped from the buffer. Note that the count is shared across all buffered logging sinks. #111126 +- Added the [metric](../v23.2/metrics.html) `log.messages.count`. This metric measures the count of messages logged on the node since startup. Note that this does not measure the fan-out of single log messages to the various configured [logging sinks](../v23.2/configure-logs.html#set-logging-levels). This metric can be helpful in understanding log rates and volumes. #111126 +- Added the `file-based-headers` field found in the `http-defaults` section of the log config, which accepts 'key-filepath' pairs. This allows values found at filepaths to be updated without restarting the cluster by sending `SIGHUP` to notify that values need to be refreshed. #111235 +- Added the [cluster setting](../v23.2/cluster-settings.html) `kv.snapshot.ingest_as_write_threshold`, which controls the size threshold below which snapshots are converted to regular writes. It defaults to `100KiB`. #110943

Cluster virtualization

-- The name of the virtual cluster that the SQL client is connected to can now be inspected via the SQL [session variable](../v23.2/set-vars.html) `virtual_cluster_name`. [#111565][#111565] +- The name of the virtual cluster that the SQL client is connected to can now be inspected via the SQL [session variable](../v23.2/set-vars.html) `virtual_cluster_name`. #111565

Command-line changes

-- The following [cluster settings](../v23.2/cluster-settings.html) have been renamed; the previous names remain available for backward-compatibility. [#109415][#109415] +- The following [cluster settings](../v23.2/cluster-settings.html) have been renamed; the previous names remain available for backward-compatibility. #109415 | Previous name | New Name | |---------------------------------------|----------------------------------------------------| @@ -111,16 +111,16 @@ Release Date: October 10, 2023

DB Console changes

-- Fixed an error on the [SQL Activity page](../v23.2/ui-overview.html#sql-activity) when there was a workload, and then the workload stopped so that no queries ran against the database in the last hour. [#111420][#111420] -- On the [Metrics page](../v23.2/ui-overview.html#metrics), now the information about which metric is used to create each chart is available on the chart's tooltip. [#111469][#111469] +- Fixed an error on the [SQL Activity page](../v23.2/ui-overview.html#sql-activity) when there was a workload, and then the workload stopped so that no queries ran against the database in the last hour. #111420 +- On the [Metrics page](../v23.2/ui-overview.html#metrics), now the information about which metric is used to create each chart is available on the chart's tooltip. #111469

Bug fixes

-- Fixed the error message that is returned when the user attempts to drop an [`ENUM`](../v23.2/enum.html) value that is used at least twice in an [`ARRAY`](../v23.2/array.html) column. [#111354][#111354] -- Added a check for values before using `mean` on the [Plan Details page](../v23.2/ui-statements-page.html), fixing a crash. [#111472][#111472] -- Fixed the metric name for `Schema Registry Registrations` on the [Metrics page](../v23.2/ui-overview.html#metrics). [#111469][#111469] -- Fixed a panic that could occur if a query used a [string](../v23.2/string.html) larger than 2^31-1 bytes. This was triggered by attempting to [import](../v23.2/import.html) a 2.7 GiB CSV file. [#111627][#111627] -- Fixed a bug where `atttypmod` in `pg_attribute` was not populated for [`TIMESTAMP`](../v23.2/timestamp.html) / [`INTERVAL`](../v23.2/interval.html) types, which meant that ORMs could not know the precision of these types properly. [#111400][#111400] +- Fixed the error message that is returned when the user attempts to drop an [`ENUM`](../v23.2/enum.html) value that is used at least twice in an [`ARRAY`](../v23.2/array.html) column. #111354 +- Added a check for values before using `mean` on the [Plan Details page](../v23.2/ui-statements-page.html), fixing a crash. #111472 +- Fixed the metric name for `Schema Registry Registrations` on the [Metrics page](../v23.2/ui-overview.html#metrics). #111469 +- Fixed a panic that could occur if a query used a [string](../v23.2/string.html) larger than 2^31-1 bytes. This was triggered by attempting to [import](../v23.2/import.html) a 2.7 GiB CSV file. #111627 +- Fixed a bug where `atttypmod` in `pg_attribute` was not populated for [`TIMESTAMP`](../v23.2/timestamp.html) / [`INTERVAL`](../v23.2/interval.html) types, which meant that ORMs could not know the precision of these types properly. #111400
@@ -130,26 +130,3 @@ This release includes 130 merged PRs by 43 authors.
-[#109415]: https://github.com/cockroachdb/cockroach/pull/109415 -[#109638]: https://github.com/cockroachdb/cockroach/pull/109638 -[#110253]: https://github.com/cockroachdb/cockroach/pull/110253 -[#110709]: https://github.com/cockroachdb/cockroach/pull/110709 -[#110943]: https://github.com/cockroachdb/cockroach/pull/110943 -[#110998]: https://github.com/cockroachdb/cockroach/pull/110998 -[#111092]: https://github.com/cockroachdb/cockroach/pull/111092 -[#111126]: https://github.com/cockroachdb/cockroach/pull/111126 -[#111235]: https://github.com/cockroachdb/cockroach/pull/111235 -[#111272]: https://github.com/cockroachdb/cockroach/pull/111272 -[#111329]: https://github.com/cockroachdb/cockroach/pull/111329 -[#111342]: https://github.com/cockroachdb/cockroach/pull/111342 -[#111354]: https://github.com/cockroachdb/cockroach/pull/111354 -[#111356]: https://github.com/cockroachdb/cockroach/pull/111356 -[#111368]: https://github.com/cockroachdb/cockroach/pull/111368 -[#111400]: https://github.com/cockroachdb/cockroach/pull/111400 -[#111420]: https://github.com/cockroachdb/cockroach/pull/111420 -[#111469]: https://github.com/cockroachdb/cockroach/pull/111469 -[#111472]: https://github.com/cockroachdb/cockroach/pull/111472 -[#111565]: https://github.com/cockroachdb/cockroach/pull/111565 -[#111594]: https://github.com/cockroachdb/cockroach/pull/111594 -[#111613]: https://github.com/cockroachdb/cockroach/pull/111613 -[#111627]: https://github.com/cockroachdb/cockroach/pull/111627 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.4.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.4.md index cf51f02ae8e..88d155c2a52 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.4.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.4.md @@ -6,21 +6,21 @@ Release Date: October 23, 2023

General changes

-- Updated the `licenses/CCT.txt` file to reflect the latest [Cockroachdb Community License](https://www.cockroachlabs.com/cockroachdb-community-license/). [#112494][#112494] +- Updated the `licenses/CCT.txt` file to reflect the latest [Cockroachdb Community License](https://www.cockroachlabs.com/cockroachdb-community-license/). #112494

{{ site.data.products.enterprise }} edition changes

-- Renamed [cluster settings](../v23.2/cluster-settings.html) related to [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) for consistency. For example, `bulkio.stream_ingestion.minimum_flush_interval` is now `physical_replication.consumer.minimum_flush_interval`. [#111197][#111197] +- Renamed [cluster settings](../v23.2/cluster-settings.html) related to [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) for consistency. For example, `bulkio.stream_ingestion.minimum_flush_interval` is now `physical_replication.consumer.minimum_flush_interval`. #111197

SQL language changes

-- [`SHOW SCHEDULES`](../v23.2/show-schedules.html) has two columns that surface the schedule options. These columns have been renamed to align with the documented option names: `on_previous_running` and `on_execution_failure`. [#111759][#111759] -- Added support for the [PLpgSQL `CLOSE` statement](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-USING-CLOSE), which allows a PLpgSQL routine to close a cursor with the name specified by a cursor variable. [#111330][#111330] -- When a [`RESTORE`](../v23.2/restore.html) with `remove_regions` is performed, the restore job will now fail if the object contains a [`REGIONAL BY ROW`](../v23.2/table-localities.html#regional-by-row-tables) table. [#111443][#111443] -- It is now possible to open a [cursor](../v23.2/cursors.html) within a PLpgSQL function or procedure with an exception block. If an error occurs, creation of the cursor is rolled back before control reaches the exception handler. [#111735][#111735] -- If a [scheduled backup](../v23.2/create-schedule-for-backup.html) resumes on a new cluster (e.g., after [physical cluster replication cutover](../v23.2/cutover-replication.html) or a cluster restore), the backup schedule will pause. The user may [resume the schedule](../v23.2/resume-schedules.html) without changing it, but should take special care to ensure no other schedule is backing up to the same [collection](../v23.2/take-full-and-incremental-backups.html#backup-collections). The user may also want to cancel the paused schedule and start a new one. [#111578][#111578] -- Added support for PLpgSQL [`FETCH`](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-USING-FETCH) and [`MOVE`](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-USING-MOVE) statements. Similar to SQL `FETCH`/`MOVE` statements, commands that would seek the [cursor](../v23.2/cursors.html) backward will fail. In addition, expressions other than constant integers are not yet supported for the `count` option. [#111318][#111318] -- Added support for the [`REFCURSOR`](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-DECLARATIONS) data type. `REFCURSOR` is a special string type that is used to handle cursors. PLpgSQL cursor declarations are required to use a variable of type `REFCURSOR`, and the name of a cursor can be passed to and from a PLpgSQL function or procedure. [#111392][#111392] +- [`SHOW SCHEDULES`](../v23.2/show-schedules.html) has two columns that surface the schedule options. These columns have been renamed to align with the documented option names: `on_previous_running` and `on_execution_failure`. #111759 +- Added support for the [PLpgSQL `CLOSE` statement](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-USING-CLOSE), which allows a PLpgSQL routine to close a cursor with the name specified by a cursor variable. #111330 +- When a [`RESTORE`](../v23.2/restore.html) with `remove_regions` is performed, the restore job will now fail if the object contains a [`REGIONAL BY ROW`](../v23.2/table-localities.html#regional-by-row-tables) table. #111443 +- It is now possible to open a [cursor](../v23.2/cursors.html) within a PLpgSQL function or procedure with an exception block. If an error occurs, creation of the cursor is rolled back before control reaches the exception handler. #111735 +- If a [scheduled backup](../v23.2/create-schedule-for-backup.html) resumes on a new cluster (e.g., after [physical cluster replication cutover](../v23.2/cutover-replication.html) or a cluster restore), the backup schedule will pause. The user may [resume the schedule](../v23.2/resume-schedules.html) without changing it, but should take special care to ensure no other schedule is backing up to the same [collection](../v23.2/take-full-and-incremental-backups.html#backup-collections). The user may also want to cancel the paused schedule and start a new one. #111578 +- Added support for PLpgSQL [`FETCH`](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-USING-FETCH) and [`MOVE`](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-USING-MOVE) statements. Similar to SQL `FETCH`/`MOVE` statements, commands that would seek the [cursor](../v23.2/cursors.html) backward will fail. In addition, expressions other than constant integers are not yet supported for the `count` option. #111318 +- Added support for the [`REFCURSOR`](https://www.postgresql.org/docs/current/plpgsql-cursors.html#PLPGSQL-CURSOR-DECLARATIONS) data type. `REFCURSOR` is a special string type that is used to handle cursors. PLpgSQL cursor declarations are required to use a variable of type `REFCURSOR`, and the name of a cursor can be passed to and from a PLpgSQL function or procedure. #111392 - Added two changes to [`FOR UPDATE`](../v23.2/select-for-update.html): - Multiple `FOR UPDATE` clauses on fully parenthesized queries are now disallowed. For example, the following statements are now disallowed: @@ -55,7 +55,7 @@ Release Date: October 23, 2023 INSERT INTO t VALUES (1) FOR UPDATE; ~~~ - This matches PostgreSQL. [#111258][#111258] + This matches PostgreSQL. #111258 - `FOR UPDATE` is now permitted on some queries that were previously disallowed. Queries that use the following operations are now allowed to have `FOR UPDATE OF` as long as the prohibited operation is in a subquery not locked by the `FOR UPDATE OF`: - `UNION` - `INTERSECT` @@ -73,15 +73,15 @@ Release Date: October 23, 2023 SELECT * FROM t, (SELECT DISTINCT 0, 0 UNION SELECT a, count(*) FROM t GROUP BY a HAVING a > 0) AS u FOR UPDATE OF t; ~~~ - This matches PostgreSQL. [#111258][#111258] -- Identifiers after numeric constants that are not separated by whitespace are now disallowed to match PostgreSQL 15 behavior. [#112021][#112021] -- Added the new column `contention_type` to the [`crdb_internal.transaction_contention_events`](../v23.2/crdb-internal.html) table. This column indicates the type of [transaction contention](../v23.2/performance-best-practices-overview.html#transaction-contention) encountered. Current values are `LOCK_WAIT` and `SERIALIZATION_CONFLICT`. [#111685][#111685] -- Changed the error message: `statement error cannot execute FOR UPDATE in a read-only transaction` to `statement error cannot execute SELECT FOR UPDATE in a read-only transaction` to match PostgreSQL. [#112138][#112138] -- Added a new [session variable](../v23.2/set-vars.html) `optimizer_use_lock_op_for_serializable`, which when set enables a new implementation of `SELECT FOR UPDATE`. This new implementation of `SELECT FOR UPDATE` acquires row locks **after** any joins and filtering, and always acquires row locks on the primary index of the table being locked. This more closely matches [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) behavior in PostgreSQL, but at the cost of more round trips from gateway node to replica leaseholder. Under read-committed isolation (and other isolation levels weaker than serializable), CockroachDB will always use this new implementation of `SELECT FOR UPDATE` regardless of the value of `optimizer_use_lock_op_for_serializable` to ensure correctness. [#112138][#112138] + This matches PostgreSQL. #111258 +- Identifiers after numeric constants that are not separated by whitespace are now disallowed to match PostgreSQL 15 behavior. #112021 +- Added the new column `contention_type` to the [`crdb_internal.transaction_contention_events`](../v23.2/crdb-internal.html) table. This column indicates the type of [transaction contention](../v23.2/performance-best-practices-overview.html#transaction-contention) encountered. Current values are `LOCK_WAIT` and `SERIALIZATION_CONFLICT`. #111685 +- Changed the error message: `statement error cannot execute FOR UPDATE in a read-only transaction` to `statement error cannot execute SELECT FOR UPDATE in a read-only transaction` to match PostgreSQL. #112138 +- Added a new [session variable](../v23.2/set-vars.html) `optimizer_use_lock_op_for_serializable`, which when set enables a new implementation of `SELECT FOR UPDATE`. This new implementation of `SELECT FOR UPDATE` acquires row locks **after** any joins and filtering, and always acquires row locks on the primary index of the table being locked. This more closely matches [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) behavior in PostgreSQL, but at the cost of more round trips from gateway node to replica leaseholder. Under read-committed isolation (and other isolation levels weaker than serializable), CockroachDB will always use this new implementation of `SELECT FOR UPDATE` regardless of the value of `optimizer_use_lock_op_for_serializable` to ensure correctness. #112138

Operational changes

-- Added a new [cluster setting](../v23.2/cluster-settings.html) `server.http.base_path` that controls the redirection of the browser after successful login with [OIDC SSO](../v23.2/sso-db-console.html). It is unlikely that this setting would need adjustment. However, it is helpful in cases where CockroachDB is running behind a load balancer or proxy that serves CockroachDB under a subpath, such as `https:// /crdb/ `. In those cases, it is necessary for the browser to redirect to `/ crdb` after login instead of `/`, which has always been the hard-coded default. [#111283][#111283] +- Added a new [cluster setting](../v23.2/cluster-settings.html) `server.http.base_path` that controls the redirection of the browser after successful login with [OIDC SSO](../v23.2/sso-db-console.html). It is unlikely that this setting would need adjustment. However, it is helpful in cases where CockroachDB is running behind a load balancer or proxy that serves CockroachDB under a subpath, such as `https:// /crdb/ `. In those cases, it is necessary for the browser to redirect to `/ crdb` after login instead of `/`, which has always been the hard-coded default. #111283

Cluster virtualization

@@ -90,9 +90,9 @@ Release Date: October 23, 2023 - `server.rangelog.ttl` - `timeseries.storage.*` - [#111769][#111769] -- The [cluster settings](../v23.2/cluster-settings.html) `cluster.organization` and `enterprise.license` can now only be set via the system virtual cluster. Attempting to set them from any other virtual cluster results in an error. [#111788][#111788] -- A new flag `--internal-rpc-port-range` allows operators to specify the port range used by virtual clusters for node-to-node communication. Users implementing [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) or cluster virtualization public preview features should use this flag if they require the `cockroach` processes to only communicate using ports in a known port range. [#111798][#111798] + #111769 +- The [cluster settings](../v23.2/cluster-settings.html) `cluster.organization` and `enterprise.license` can now only be set via the system virtual cluster. Attempting to set them from any other virtual cluster results in an error. #111788 +- A new flag `--internal-rpc-port-range` allows operators to specify the port range used by virtual clusters for node-to-node communication. Users implementing [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) or cluster virtualization public preview features should use this flag if they require the `cockroach` processes to only communicate using ports in a known port range. #111798 - Two guardrails are available to system operators to help with users upgrading from a deployment without cluster virtualization enabled to a deployment using cluster virtualization. This is intended to help in cases where the user is not connected to the correct SQL interface to perform certain configuration operations. There are two guardrails included: - The `sql.restrict_system_interface.enabled` cluster setting encourages users to use a virtual cluster for their application workload. When set, certain common operations that end users may execute to set up an application workload are disallowed, such as running DDL statements or modifying an application level cluster setting. Users will receive an error similar to: @@ -115,38 +115,38 @@ Release Date: October 23, 2023 HINT: Connect to the system interface and modify the cluster setting from there. ~~~ - [#111568][#111568] -- The predefined config profiles related to cluster virtualization now automatically set the new [cluster settings](../v23.2/cluster-settings.html) `sql.restrict_system_interface.enabled` and `sql.error_tip_system_interface.enabled`. [#111568][#111568] -- The hidden `--secondary-tenant-port-offset` option has been removed. Users who were previously using this option should use `--internal-rpc-port-range` instead. [#112050][#112050] -- Added support for automatic finalization of a virtual cluster's version upgrade. A new setting `cluster.auto_upgrade.enabled` was added to enable and disable automatic cluster version upgrade (finalization). It will be used in automatic upgrade of both the storage cluster and its virtual clusters. [#102427][#102427] + #111568 +- The predefined config profiles related to cluster virtualization now automatically set the new [cluster settings](../v23.2/cluster-settings.html) `sql.restrict_system_interface.enabled` and `sql.error_tip_system_interface.enabled`. #111568 +- The hidden `--secondary-tenant-port-offset` option has been removed. Users who were previously using this option should use `--internal-rpc-port-range` instead. #112050 +- Added support for automatic finalization of a virtual cluster's version upgrade. A new setting `cluster.auto_upgrade.enabled` was added to enable and disable automatic cluster version upgrade (finalization). It will be used in automatic upgrade of both the storage cluster and its virtual clusters. #102427

Command-line changes

-- [`cockroach debug zip`](../v23.2/cockroach-debug-zip.html) has an additional flag that is default off `include-running-job-traces` that will enable collecting the in-flight traces of traceable jobs, such as [backup](../v23.2/backup.html), [restore](../v23.2/restore.html), [import](../v23.2/import-into.html), [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) and dump them in a `jobs/` subdirectory in the zip. [#112644][#112644] +- [`cockroach debug zip`](../v23.2/cockroach-debug-zip.html) has an additional flag that is default off `include-running-job-traces` that will enable collecting the in-flight traces of traceable jobs, such as [backup](../v23.2/backup.html), [restore](../v23.2/restore.html), [import](../v23.2/import-into.html), [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) and dump them in a `jobs/` subdirectory in the zip. #112644

DB Console changes

-- The [**Jobs** table](../v23.2/ui-jobs-page.html) will now correctly display timestamps for creation, last modified, and the completed time fields. [#110366][#110366] +- The [**Jobs** table](../v23.2/ui-jobs-page.html) will now correctly display timestamps for creation, last modified, and the completed time fields. #110366 - The [transaction insight details](../v23.2/ui-insights-page.html) will show the following details when CockroachDB has information on a transaction execution with a `40001` error code and it has captured the conflicting transaction meta details (only available if the transaction had not yet committed at the time of execution). A section called `Failed Execution` will appear when this information is available and it will contain: - Blocking transaction execution ID - Blocking transaction fingerprint ID - Conflict location - - Database, table, and index names [#111873][#111873] -- Added progressive loading functionality to the [Databases page](../v23.2/ui-databases-page.html). [#110901][#110901] + - Database, table, and index names #111873 +- Added progressive loading functionality to the [Databases page](../v23.2/ui-databases-page.html). #110901

Bug fixes

-- Fixed a bug in [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) where the primary cluster would not be able to take [backups](../v23.2/take-full-and-incremental-backups.html) when a primary cluster node was unavailable. [#111337][#111337] -- Fixed a bug in [transaction insight details](../v23.2/ui-insights-page.html) where it was possible to see the contention details of other transactions. Now, CockroachDB will only surface contention details for the current transaction. [#111867][#111867] -- [Voter constraints](../v23.2/configure-replication-zones.html) will now be satisfied by promoting existing non-voters. Previously, there was a bug where voter constraints were never satisfied due to all existing replicas being considered necessary to satisfy a replica constraint. [#111609][#111609] -- Fixed a bug where `indoption` inside `pg_index` was not properly encoded causing clients to be unable to decode it as `int2vector`. [#111911][#111911] -- This patch fixes an issue where the [optimizer](../v23.2/cost-based-optimizer.html) fails to honor the `statement_timeout` session setting when generating constrained index scans for queries with large `IN` lists or `= ANY` predicates on multiple index key columns, which may lead to an out of memory condition on the node. [#111979][#111979] -- This patch fixes a performance issue in join queries with a `LIMIT` clause, where the [optimizer](../v23.2/cost-based-optimizer.html) may fail to push a `WHERE` clause filter into a join due to how the `LIMIT` operation is internally rewritten. This causes a full scan of the table referenced in the filter. [#110593][#110593] -- Fixed a bug that caused internal errors during query optimization in rare cases. The bug has been present since version v2.1.11, but it is more likely to occur in version v21.2.0 and later, though it is still rare. The bug only presents when a query contains `min` and `max` [aggregate functions](../v23.2/functions-and-operators.html#aggregate-functions). [#112255][#112255] +- Fixed a bug in [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) where the primary cluster would not be able to take [backups](../v23.2/take-full-and-incremental-backups.html) when a primary cluster node was unavailable. #111337 +- Fixed a bug in [transaction insight details](../v23.2/ui-insights-page.html) where it was possible to see the contention details of other transactions. Now, CockroachDB will only surface contention details for the current transaction. #111867 +- [Voter constraints](../v23.2/configure-replication-zones.html) will now be satisfied by promoting existing non-voters. Previously, there was a bug where voter constraints were never satisfied due to all existing replicas being considered necessary to satisfy a replica constraint. #111609 +- Fixed a bug where `indoption` inside `pg_index` was not properly encoded causing clients to be unable to decode it as `int2vector`. #111911 +- This patch fixes an issue where the [optimizer](../v23.2/cost-based-optimizer.html) fails to honor the `statement_timeout` session setting when generating constrained index scans for queries with large `IN` lists or `= ANY` predicates on multiple index key columns, which may lead to an out of memory condition on the node. #111979 +- This patch fixes a performance issue in join queries with a `LIMIT` clause, where the [optimizer](../v23.2/cost-based-optimizer.html) may fail to push a `WHERE` clause filter into a join due to how the `LIMIT` operation is internally rewritten. This causes a full scan of the table referenced in the filter. #110593 +- Fixed a bug that caused internal errors during query optimization in rare cases. The bug has been present since version v2.1.11, but it is more likely to occur in version v21.2.0 and later, though it is still rare. The bug only presents when a query contains `min` and `max` [aggregate functions](../v23.2/functions-and-operators.html#aggregate-functions). #112255

Performance improvements

-- This patch adds support for insert fast-path uniqueness checks on [`REGIONAL BY ROW`](../v23.2/table-localities.html#regional-by-row-tables) tables where the source is a `VALUES` clause with a single row. This results in a reduction in latency for single-row inserts to `REGIONAL BY ROW` tables and hash-sharded `REGIONAL BY ROW` tables with unique indexes. [#111822][#111822] +- This patch adds support for insert fast-path uniqueness checks on [`REGIONAL BY ROW`](../v23.2/table-localities.html#regional-by-row-tables) tables where the source is a `VALUES` clause with a single row. This results in a reduction in latency for single-row inserts to `REGIONAL BY ROW` tables and hash-sharded `REGIONAL BY ROW` tables with unique indexes. #111822

Contributors

@@ -158,40 +158,3 @@ We would like to thank the following contributors from the CockroachDB community -[#102427]: https://github.com/cockroachdb/cockroach/pull/102427 -[#110366]: https://github.com/cockroachdb/cockroach/pull/110366 -[#110593]: https://github.com/cockroachdb/cockroach/pull/110593 -[#110870]: https://github.com/cockroachdb/cockroach/pull/110870 -[#110901]: https://github.com/cockroachdb/cockroach/pull/110901 -[#111197]: https://github.com/cockroachdb/cockroach/pull/111197 -[#111258]: https://github.com/cockroachdb/cockroach/pull/111258 -[#111283]: https://github.com/cockroachdb/cockroach/pull/111283 -[#111303]: https://github.com/cockroachdb/cockroach/pull/111303 -[#111318]: https://github.com/cockroachdb/cockroach/pull/111318 -[#111330]: https://github.com/cockroachdb/cockroach/pull/111330 -[#111337]: https://github.com/cockroachdb/cockroach/pull/111337 -[#111392]: https://github.com/cockroachdb/cockroach/pull/111392 -[#111443]: https://github.com/cockroachdb/cockroach/pull/111443 -[#111568]: https://github.com/cockroachdb/cockroach/pull/111568 -[#111578]: https://github.com/cockroachdb/cockroach/pull/111578 -[#111609]: https://github.com/cockroachdb/cockroach/pull/111609 -[#111685]: https://github.com/cockroachdb/cockroach/pull/111685 -[#111735]: https://github.com/cockroachdb/cockroach/pull/111735 -[#111759]: https://github.com/cockroachdb/cockroach/pull/111759 -[#111769]: https://github.com/cockroachdb/cockroach/pull/111769 -[#111788]: https://github.com/cockroachdb/cockroach/pull/111788 -[#111798]: https://github.com/cockroachdb/cockroach/pull/111798 -[#111822]: https://github.com/cockroachdb/cockroach/pull/111822 -[#111833]: https://github.com/cockroachdb/cockroach/pull/111833 -[#111867]: https://github.com/cockroachdb/cockroach/pull/111867 -[#111873]: https://github.com/cockroachdb/cockroach/pull/111873 -[#111911]: https://github.com/cockroachdb/cockroach/pull/111911 -[#111979]: https://github.com/cockroachdb/cockroach/pull/111979 -[#112021]: https://github.com/cockroachdb/cockroach/pull/112021 -[#112050]: https://github.com/cockroachdb/cockroach/pull/112050 -[#112138]: https://github.com/cockroachdb/cockroach/pull/112138 -[#112255]: https://github.com/cockroachdb/cockroach/pull/112255 -[#112457]: https://github.com/cockroachdb/cockroach/pull/112457 -[#112494]: https://github.com/cockroachdb/cockroach/pull/112494 -[#112589]: https://github.com/cockroachdb/cockroach/pull/112589 -[#112644]: https://github.com/cockroachdb/cockroach/pull/112644 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.5.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.5.md index 4a6d73c0c02..69d2e3704e9 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.5.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.5.md @@ -6,11 +6,11 @@ Release Date: October 30, 2023

SQL language changes

-- Added support for the special `OTHERS` condition in PL/pgSQL exception blocks, which allows matching any error code apart from `query_canceled` and `assert_failure`. Note that Class 40 errors (`40000`, `40001`, `40003`, `40002`, and `40P01`) cannot be caught either. This is tracked in [#111446](https://github.com/cockroachdb/cockroach/pull/111446). [#112817][#112817] +- Added support for the special `OTHERS` condition in PL/pgSQL exception blocks, which allows matching any error code apart from `query_canceled` and `assert_failure`. Note that Class 40 errors (`40000`, `40001`, `40003`, `40002`, and `40P01`) cannot be caught either. This is tracked in #111446. #112817

Bug fixes

-- Previously, queries with the [`ST_Union`](../v23.2/st_union.html) aggregate function could produce incorrect results in some cases due to the query optimizer performing invalid optimizations. This is now fixed. This bug had been present since the `ST_Union` function was introduced in v20.2.0. [#112780][#112780] +- Previously, queries with the [`ST_Union`](../v23.2/st_union.html) aggregate function could produce incorrect results in some cases due to the query optimizer performing invalid optimizations. This is now fixed. This bug had been present since the `ST_Union` function was introduced in v20.2.0. #112780
@@ -20,5 +20,3 @@ This release includes 27 merged PRs by 17 authors.
-[#112780]: https://github.com/cockroachdb/cockroach/pull/112780 -[#112817]: https://github.com/cockroachdb/cockroach/pull/112817 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.6.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.6.md index 3a62d428f4f..af84e14e6b6 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.6.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.6.md @@ -6,62 +6,43 @@ Release Date: November 7, 2023

General changes

-- The CockroachDB Docker image is now based on [Red Hat's ubi9/ubi-minimal image](https://catalog.redhat.com/software/containers/ubi9/ubi-minimal/615bd9b4075b022acc111bf5?architecture=amd64&image=652fc5a903899c8ddcf105be) instead of the ubi8/ubi-minimal image. [#112967][#112967] +- The CockroachDB Docker image is now based on [Red Hat's ubi9/ubi-minimal image](https://catalog.redhat.com/software/containers/ubi9/ubi-minimal/615bd9b4075b022acc111bf5?architecture=amd64&image=652fc5a903899c8ddcf105be) instead of the ubi8/ubi-minimal image. #112967

SQL language changes

-- Added the built-in [function](../v23.2/functions-and-operators.html) `jsonb_array_to_string_array` that converts [`JSONB`](../v23.2/jsonb.html) array to [`STRING`](../v23.2/string.html) array. [#112865][#112865] -- The built-in [function](../v23.2/functions-and-operators.html) `jsonb_array_to_string_array` can now return [`NULL`](../v23.2/null-handling.html) objects. [#112865][#112865] +- Added the built-in [function](../v23.2/functions-and-operators.html) `jsonb_array_to_string_array` that converts [`JSONB`](../v23.2/jsonb.html) array to [`STRING`](../v23.2/string.html) array. #112865 +- The built-in [function](../v23.2/functions-and-operators.html) `jsonb_array_to_string_array` can now return [`NULL`](../v23.2/null-handling.html) objects. #112865

Operational changes

-- Introduced the [cluster setting](../v23.2/cluster-settings.html) `kv.gc.sticky_hint.enabled` that helps expediting [garbage collection](../v23.2/architecture/storage-layer.html#garbage-collection) after range deletions. For example, when a SQL table or index is dropped. `kv.gc.sticky_hint.enabled` is enabled by default in v23.2. The setting has been deprecated in v23.2. [#113040][#113040] -- Introduced a new [environment variable](../v23.2/set-vars.html) that allows an operator to configure the [compaction](../v23.2/architecture/storage-layer.html#compaction) concurrency. [#113313][#113313] -- [Debug zip](../v23.2/cockroach-debug-zip.html) will now collect the active traces of all running or reverting traceable jobs. This includes [restores](../v23.2/restore.html), [imports](../v23.2/import.html), [backups](../v23.2/backup.html), and [physical cluster replication](../v23.2/physical-cluster-replication-overview.html). [#113172][#113172] +- Introduced the [cluster setting](../v23.2/cluster-settings.html) `kv.gc.sticky_hint.enabled` that helps expediting [garbage collection](../v23.2/architecture/storage-layer.html#garbage-collection) after range deletions. For example, when a SQL table or index is dropped. `kv.gc.sticky_hint.enabled` is enabled by default in v23.2. The setting has been deprecated in v23.2. #113040 +- Introduced a new [environment variable](../v23.2/set-vars.html) that allows an operator to configure the [compaction](../v23.2/architecture/storage-layer.html#compaction) concurrency. #113313 +- [Debug zip](../v23.2/cockroach-debug-zip.html) will now collect the active traces of all running or reverting traceable jobs. This includes [restores](../v23.2/restore.html), [imports](../v23.2/import.html), [backups](../v23.2/backup.html), and [physical cluster replication](../v23.2/physical-cluster-replication-overview.html). #113172

Cluster virtualization

-- The [privilege](../v23.2/security-reference/authorization.html#supported-privileges) that controls access to `CREATE VIRTUAL CLUSTER` and other virtual cluster management syntax is now called `MANAGEVIRTUALCLUSTER`. [#113076][#113076] +- The [privilege](../v23.2/security-reference/authorization.html#supported-privileges) that controls access to `CREATE VIRTUAL CLUSTER` and other virtual cluster management syntax is now called `MANAGEVIRTUALCLUSTER`. #113076

Bug fixes

-- Fixed a bug that could prevent [`RESTORE`](../v23.2/restore.html) from working if it was performed during a cluster upgrade. [#112759][#112759] -- Fixed a bug where the opclass for a [trigram index](../v23.2/trigram-indexes.html) is not shown if CockroachDB creates a trigram index and later displays it via [`SHOW CREATE TABLE`](../v23.2/show-create.html). [#113071][#113071] -- Fixed a bug where CockroachDB could incorrectly evaluate [lookup](../v23.2/joins.html#lookup-joins) and index [joins](../v23.2/joins.html) into tables with at least three [column families](../v23.2/column-families.html). This would result in either the `non-nullable column with no value` internal error, or the query would return incorrect results. This bug was introduced in v22.2. [#113105][#113105] -- Fixed a bug where [`ALTER PRIMARY KEY`](../v23.2/alter-table.html#alter-primary-key) would incorrectly disable [secondary indexes](../v23.2/schema-design-indexes.html) while new secondary indexes were being backfilled when using the [declarative schema changer](../v23.2/online-schema-changes.html). [#112627][#112627] -- Fixed a bug where the `unique_constraint_catalog` and `unique_constraint_schema` columns in [`information_schema.referential_constraints`](../v23.2/information-schema.html#referential_constraints) could be incorrect for cross schema or cross database references. [#112739][#112739] -- Fixed a bug in a method that was used by some of the [jobs](../v23.2/show-jobs.html) observability infrastructure. This method could be triggered if a file was overwritten with a different chunking strategy. [#113290][#113290] -- Fixed a bug where the result of [`SHOW CREATE TABLE`](../v23.2/show-create.html) for a table that had a [collated string column](../v23.2/collate.html) with a default expression was incorrect because the statement could not be parsed. [#113119][#113119] -- Fixed the SQL activity update job to: avoid conflicts on update, reduce the amount of data cached to only what the overview page requires, and fix the correctness of the top queries. [#112865][#112865] -- Fixed a bug that could prevent [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) from advancing in the face of some range deletion operations. [#113041][#113041] -- Fixed a bug where [`ALTER TYPE`](../v23.2/alter-type.html) could get stuck if [`DROP TYPE`](../v23.2/drop-type.html) was executed concurrently. [#113644][#113644] -- Fixed a bug that could cause internal errors or panics while attempting to forecast [statistics](../v23.2/show-statistics.html) on a numeric column. [#113797][#113797] -- Rolled back deletes no longer cause a discrepancy between computed statistics and the actual stored values. [#113766][#113766] +- Fixed a bug that could prevent [`RESTORE`](../v23.2/restore.html) from working if it was performed during a cluster upgrade. #112759 +- Fixed a bug where the opclass for a [trigram index](../v23.2/trigram-indexes.html) is not shown if CockroachDB creates a trigram index and later displays it via [`SHOW CREATE TABLE`](../v23.2/show-create.html). #113071 +- Fixed a bug where CockroachDB could incorrectly evaluate [lookup](../v23.2/joins.html#lookup-joins) and index [joins](../v23.2/joins.html) into tables with at least three [column families](../v23.2/column-families.html). This would result in either the `non-nullable column with no value` internal error, or the query would return incorrect results. This bug was introduced in v22.2. #113105 +- Fixed a bug where [`ALTER PRIMARY KEY`](../v23.2/alter-table.html#alter-primary-key) would incorrectly disable [secondary indexes](../v23.2/schema-design-indexes.html) while new secondary indexes were being backfilled when using the [declarative schema changer](../v23.2/online-schema-changes.html). #112627 +- Fixed a bug where the `unique_constraint_catalog` and `unique_constraint_schema` columns in [`information_schema.referential_constraints`](../v23.2/information-schema.html#referential_constraints) could be incorrect for cross schema or cross database references. #112739 +- Fixed a bug in a method that was used by some of the [jobs](../v23.2/show-jobs.html) observability infrastructure. This method could be triggered if a file was overwritten with a different chunking strategy. #113290 +- Fixed a bug where the result of [`SHOW CREATE TABLE`](../v23.2/show-create.html) for a table that had a [collated string column](../v23.2/collate.html) with a default expression was incorrect because the statement could not be parsed. #113119 +- Fixed the SQL activity update job to: avoid conflicts on update, reduce the amount of data cached to only what the overview page requires, and fix the correctness of the top queries. #112865 +- Fixed a bug that could prevent [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) from advancing in the face of some range deletion operations. #113041 +- Fixed a bug where [`ALTER TYPE`](../v23.2/alter-type.html) could get stuck if [`DROP TYPE`](../v23.2/drop-type.html) was executed concurrently. #113644 +- Fixed a bug that could cause internal errors or panics while attempting to forecast [statistics](../v23.2/show-statistics.html) on a numeric column. #113797 +- Rolled back deletes no longer cause a discrepancy between computed statistics and the actual stored values. #113766

Performance improvements

-- Addressed a performance regression that can happen when the declarative [schema changer](../v23.2/online-schema-changes.html) is used to create an index with a concurrent workload. [#113725][#113725] +- Addressed a performance regression that can happen when the declarative [schema changer](../v23.2/online-schema-changes.html) is used to create an index with a concurrent workload. #113725

Contributors

This release includes 117 merged PRs by 49 authors. -[#112627]: https://github.com/cockroachdb/cockroach/pull/112627 -[#112739]: https://github.com/cockroachdb/cockroach/pull/112739 -[#112759]: https://github.com/cockroachdb/cockroach/pull/112759 -[#112865]: https://github.com/cockroachdb/cockroach/pull/112865 -[#112967]: https://github.com/cockroachdb/cockroach/pull/112967 -[#113040]: https://github.com/cockroachdb/cockroach/pull/113040 -[#113041]: https://github.com/cockroachdb/cockroach/pull/113041 -[#113071]: https://github.com/cockroachdb/cockroach/pull/113071 -[#113076]: https://github.com/cockroachdb/cockroach/pull/113076 -[#113105]: https://github.com/cockroachdb/cockroach/pull/113105 -[#113119]: https://github.com/cockroachdb/cockroach/pull/113119 -[#113172]: https://github.com/cockroachdb/cockroach/pull/113172 -[#113284]: https://github.com/cockroachdb/cockroach/pull/113284 -[#113290]: https://github.com/cockroachdb/cockroach/pull/113290 -[#113313]: https://github.com/cockroachdb/cockroach/pull/113313 -[#113644]: https://github.com/cockroachdb/cockroach/pull/113644 -[#113725]: https://github.com/cockroachdb/cockroach/pull/113725 -[#113766]: https://github.com/cockroachdb/cockroach/pull/113766 -[#113797]: https://github.com/cockroachdb/cockroach/pull/113797 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-alpha.7.md b/src/current/_includes/releases/v23.2/v23.2.0-alpha.7.md index 521e43cd835..eb848b557d1 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-alpha.7.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-alpha.7.md @@ -6,29 +6,29 @@ Release Date: November 20, 2023

SQL language changes

-- Previously, if [session variable `use_declarative_schema_changer`](../v23.2/session-variables.html#use_declarative_schema_changer) was set to `off`, then [`ALTER TABLE ... ALTER COLUMN ... SET NOT NULL`](../v23.2/alter-table.html#alter-column) was run on a column which contained a NULL value, an error with code `23514` (`check_violation`) would be returned. Now in this scenario the error returned will have code 23502 (`not_null_violation`) to match [PostgreSQL](https://www.postgresql.org/docs/8.4/errcodes-appendix.html). [#113970][#113970] -- The `sql.txn.read_committed_syntax.enabled` [cluster setting](../v23.2/cluster-settings.html) was renamed to [`sql.txn.read_committed_isolation.enabled`](../v23.2/cluster-settings.html#setting-sql-txn-read-committed-isolation-enabled). [#113833][#113833] +- Previously, if [session variable `use_declarative_schema_changer`](../v23.2/session-variables.html#use_declarative_schema_changer) was set to `off`, then [`ALTER TABLE ... ALTER COLUMN ... SET NOT NULL`](../v23.2/alter-table.html#alter-column) was run on a column which contained a NULL value, an error with code `23514` (`check_violation`) would be returned. Now in this scenario the error returned will have code 23502 (`not_null_violation`) to match [PostgreSQL](https://www.postgresql.org/docs/8.4/errcodes-appendix.html). #113970 +- The `sql.txn.read_committed_syntax.enabled` [cluster setting](../v23.2/cluster-settings.html) was renamed to [`sql.txn.read_committed_isolation.enabled`](../v23.2/cluster-settings.html#setting-sql-txn-read-committed-isolation-enabled). #113833

Command-line changes

-- The `cockroach connect` functionality has been deprecated. [#114241][#114241] +- The `cockroach connect` functionality has been deprecated. #114241

DB Console changes

-- Previously, the forward arrow button on the [time selector](../v23.2/ui-statements-page.html#time-interval) would not move the time window forward if the current end time was less than "Now() - time window". For example, with a 10 minute time window, it was not possible to move forward if current end time is less that "Now() - 10 minutes". This caused the forward arrow button to become disabled even though there was more data to display. Now this scenario is handled by the forward arrow button selecting the latest available time window (similar to the **Now** button). [#113907][#113907] +- Previously, the forward arrow button on the [time selector](../v23.2/ui-statements-page.html#time-interval) would not move the time window forward if the current end time was less than "Now() - time window". For example, with a 10 minute time window, it was not possible to move forward if current end time is less that "Now() - 10 minutes". This caused the forward arrow button to become disabled even though there was more data to display. Now this scenario is handled by the forward arrow button selecting the latest available time window (similar to the **Now** button). #113907

Bug fixes

-- Removed duplication of metrics names on [DB Console Metrics](../v23.2/ui-overview.html#metrics) charts' tooltips. [#113728][#113728] -- Fixed a bug that could cause [ALTER DATABASE ... ADD/DROP REGION](../v23.2/alter-database.html#add-region) to hang if [node localities](../v23.2/cockroach-start.html#locality) were changed after regions were added. [#114102][#114102] -- A bug in the [log configuration](../v23.2/configure-logs.html) code prevented users from setting the [`datetime-format` and `datetime-timezone` log format options](../v23.2/configure-logs.html#datetime-field-for-json-format) (set via the `format-options` structure) within their log configuration. Specifically, when users tried to use these options in `file-defaults` with any [`json`](../v23.2/log-formats.html#format-json) type log format, the log configuration was previously unable to be parsed due to validation errors. This was because the `file-defaults.format-options` were propagated to the `sinks.stderr.format-options`. `sinks.stderr` only supports a format of [`crdb-v2-tty`](../v23.2/log-formats.html#format-crdb-v2-tty). Therefore, the incorrectly propagated `format-options`, which are only supported by the `json` log format, were identified as not being supported when validating `sinks.stderr`. This bug is now fixed and the `file-defaults.format-options` are only propagated to `sinks.stderr.format-options` if both of these conditions are true: 1. `file-defaults.format` is one of [`crdb-v2`](../v23.2/log-formats.html#format-crdb-v2) or `crdb-v2-tty`. 2. `sinks.stderr.format-options` are not explicitly set in the log configuration. [#113684][#113684] -- Previously, when executing queries with [index joins](../v23.2/indexes.html#storing-columns) or [lookup joins](../v23.2/joins.html#lookup-joins) or both when the ordering needs to be maintained, CockroachDB in some cases would get into a pathological behavior which would lead to increased query latency, possibly by one or two orders of magnitude. This bug was introduced in v22.2 and is now fixed. [#114117][#114117] -- Previously, the [SHOW STATISTICS command](../v23.2/show-statistics.html) incorrectly required the user to have the admin role. Now, it correctly only requires the user to have any [privilege](../v23.2/security-reference/authorization.html#privileges) on the table being inspected. [#114449][#114449] -- Fixed a bug that could cause a [query plan](../v23.2/cost-based-optimizer.html) to skip scanning rows from the local region when performing a [lookup join](../v23.2/joins.html#lookup-joins) with a [`REGIONAL BY ROW` table](../v23.2/regional-tables.html#regional-by-row-tables) as the input. [#114458][#114458] +- Removed duplication of metrics names on [DB Console Metrics](../v23.2/ui-overview.html#metrics) charts' tooltips. #113728 +- Fixed a bug that could cause [ALTER DATABASE ... ADD/DROP REGION](../v23.2/alter-database.html#add-region) to hang if [node localities](../v23.2/cockroach-start.html#locality) were changed after regions were added. #114102 +- A bug in the [log configuration](../v23.2/configure-logs.html) code prevented users from setting the [`datetime-format` and `datetime-timezone` log format options](../v23.2/configure-logs.html#datetime-field-for-json-format) (set via the `format-options` structure) within their log configuration. Specifically, when users tried to use these options in `file-defaults` with any [`json`](../v23.2/log-formats.html#format-json) type log format, the log configuration was previously unable to be parsed due to validation errors. This was because the `file-defaults.format-options` were propagated to the `sinks.stderr.format-options`. `sinks.stderr` only supports a format of [`crdb-v2-tty`](../v23.2/log-formats.html#format-crdb-v2-tty). Therefore, the incorrectly propagated `format-options`, which are only supported by the `json` log format, were identified as not being supported when validating `sinks.stderr`. This bug is now fixed and the `file-defaults.format-options` are only propagated to `sinks.stderr.format-options` if both of these conditions are true: 1. `file-defaults.format` is one of [`crdb-v2`](../v23.2/log-formats.html#format-crdb-v2) or `crdb-v2-tty`. 2. `sinks.stderr.format-options` are not explicitly set in the log configuration. #113684 +- Previously, when executing queries with [index joins](../v23.2/indexes.html#storing-columns) or [lookup joins](../v23.2/joins.html#lookup-joins) or both when the ordering needs to be maintained, CockroachDB in some cases would get into a pathological behavior which would lead to increased query latency, possibly by one or two orders of magnitude. This bug was introduced in v22.2 and is now fixed. #114117 +- Previously, the [SHOW STATISTICS command](../v23.2/show-statistics.html) incorrectly required the user to have the admin role. Now, it correctly only requires the user to have any [privilege](../v23.2/security-reference/authorization.html#privileges) on the table being inspected. #114449 +- Fixed a bug that could cause a [query plan](../v23.2/cost-based-optimizer.html) to skip scanning rows from the local region when performing a [lookup join](../v23.2/joins.html#lookup-joins) with a [`REGIONAL BY ROW` table](../v23.2/regional-tables.html#regional-by-row-tables) as the input. #114458

Performance improvements

-- This change prevents failed requests from being issued on follower nodes that are [draining, decommissioning](../v23.2/node-shutdown.html) or unhealthy which prevents latency spikes if those nodes later go offline. [#114259][#114259] +- This change prevents failed requests from being issued on follower nodes that are [draining, decommissioning](../v23.2/node-shutdown.html) or unhealthy which prevents latency spikes if those nodes later go offline. #114259
@@ -38,19 +38,3 @@ This release includes 95 merged PRs by 33 authors.
-[#113684]: https://github.com/cockroachdb/cockroach/pull/113684 -[#113728]: https://github.com/cockroachdb/cockroach/pull/113728 -[#113833]: https://github.com/cockroachdb/cockroach/pull/113833 -[#113850]: https://github.com/cockroachdb/cockroach/pull/113850 -[#113876]: https://github.com/cockroachdb/cockroach/pull/113876 -[#113880]: https://github.com/cockroachdb/cockroach/pull/113880 -[#113907]: https://github.com/cockroachdb/cockroach/pull/113907 -[#113970]: https://github.com/cockroachdb/cockroach/pull/113970 -[#114102]: https://github.com/cockroachdb/cockroach/pull/114102 -[#114117]: https://github.com/cockroachdb/cockroach/pull/114117 -[#114241]: https://github.com/cockroachdb/cockroach/pull/114241 -[#114259]: https://github.com/cockroachdb/cockroach/pull/114259 -[#114449]: https://github.com/cockroachdb/cockroach/pull/114449 -[#114458]: https://github.com/cockroachdb/cockroach/pull/114458 -[1c1ed20ea]: https://github.com/cockroachdb/cockroach/commit/1c1ed20ea -[f3e3583fb]: https://github.com/cockroachdb/cockroach/commit/f3e3583fb diff --git a/src/current/_includes/releases/v23.2/v23.2.0-beta.1.md b/src/current/_includes/releases/v23.2/v23.2.0-beta.1.md index 649e1f40687..0852e2a0da4 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-beta.1.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-beta.1.md @@ -6,17 +6,17 @@ Release Date: November 27, 2023

SQL language changes

-- [`COPY`](../v23.2/copy.html) commands now use the [`background` quality-of-service level](../v23.2/admission-control.html#set-quality-of-service-level-for-a-session) by default, which makes `COPY` commands subject to [admission control](../v23.2/admission-control.html). The new session variable `copy_transaction_quality_of_service` controls the quality-of-service level for `COPY` commands. Previously, `COPY` used the same level as other commands, determined by the `default_transaction_quality_of_service` session variable, which is set to `regular` by default. `regular` is not subject to admission control. [#114535][#114535] +- [`COPY`](../v23.2/copy.html) commands now use the [`background` quality-of-service level](../v23.2/admission-control.html#set-quality-of-service-level-for-a-session) by default, which makes `COPY` commands subject to [admission control](../v23.2/admission-control.html). The new session variable `copy_transaction_quality_of_service` controls the quality-of-service level for `COPY` commands. Previously, `COPY` used the same level as other commands, determined by the `default_transaction_quality_of_service` session variable, which is set to `regular` by default. `regular` is not subject to admission control. #114535

DB Console changes

-- The [Overview page](../v23.2/ui-overview.html) now correctly renders the background color for the email signup, which fixes an issue where it was difficult to read the text. [#114547][#114547] -- Fixed a bug where selecting the internal application name prefix `$ internal` from the **Application Name** dropdown on the [**SQL Activity Statements** page](../v23.2/ui-statements-page.html) was not showing internal queries. The filtering logic will now show if there are statements with the `$ internal` application name prefix. [#114517][#114517] +- The [Overview page](../v23.2/ui-overview.html) now correctly renders the background color for the email signup, which fixes an issue where it was difficult to read the text. #114547 +- Fixed a bug where selecting the internal application name prefix `$ internal` from the **Application Name** dropdown on the [**SQL Activity Statements** page](../v23.2/ui-statements-page.html) was not showing internal queries. The filtering logic will now show if there are statements with the `$ internal` application name prefix. #114517

Bug fixes

-- Fixed a bug where an empty [range](../v23.2/architecture/overview.html#architecture-range) corresponding to a [`DROP TABLE`](../v23.2/drop-table.html) did not respect system-level span configurations such as [protected timestamps](../v23.2/architecture/storage-layer.html#protected-timestamps), which potentially caused reads above the protected timestamp to fail. [#114833][#114833] -- Fixed error handling for `GetFiles` so that it does not cause a nil pointer dereference. [#114830][#114830] +- Fixed a bug where an empty [range](../v23.2/architecture/overview.html#architecture-range) corresponding to a [`DROP TABLE`](../v23.2/drop-table.html) did not respect system-level span configurations such as [protected timestamps](../v23.2/architecture/storage-layer.html#protected-timestamps), which potentially caused reads above the protected timestamp to fail. #114833 +- Fixed error handling for `GetFiles` so that it does not cause a nil pointer dereference. #114830

Contributors

@@ -24,8 +24,3 @@ This release includes 33 merged PRs by 21 authors. -[#114517]: https://github.com/cockroachdb/cockroach/pull/114517 -[#114535]: https://github.com/cockroachdb/cockroach/pull/114535 -[#114547]: https://github.com/cockroachdb/cockroach/pull/114547 -[#114830]: https://github.com/cockroachdb/cockroach/pull/114830 -[#114833]: https://github.com/cockroachdb/cockroach/pull/114833 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-beta.2.md b/src/current/_includes/releases/v23.2/v23.2.0-beta.2.md index a73679e6a57..bf2c583f6c9 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-beta.2.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-beta.2.md @@ -6,38 +6,38 @@ Release Date: December 5, 2023

General changes

-- CockroachDB now periodically dumps the state of its internal memory accounting system into the `heap_profiler/` directory when a heap profile is taken. To disable this behavior, set the `diagnostics.memory_monitoring_dumps.enabled` [cluster setting](https://cockroachlabs.com/docs/v23.2/cluster-settings) to `false`. [#114998][#114998] -- Multi-level compactions have been disabled to investigate possible performance issues with foreground throughput and latency. [#115481][#115481] +- CockroachDB now periodically dumps the state of its internal memory accounting system into the `heap_profiler/` directory when a heap profile is taken. To disable this behavior, set the `diagnostics.memory_monitoring_dumps.enabled` [cluster setting](https://cockroachlabs.com/docs/v23.2/cluster-settings) to `false`. #114998 +- Multi-level compactions have been disabled to investigate possible performance issues with foreground throughput and latency. #115481

Enterprise Edition changes

-- When using [Physical Cluster Replication](https://cockroachlabs.com/docs/v23.2/physical-cluster-replication-overview), you can now [initiate a cutover](https://cockroachlabs.com/docs/v23.2/cutover-replication) as of `LATEST` before the initial scan completes. [#115101][#115101] -- Sensitive information such as `api_secret`, `sasl_password`, `client_cert`, and `ca_cert`, is now redacted in output from commands `SHOW CHANGEFEED JOB`, `SHOW CHANGEFEED JOBS`, and [`SHOW JOBS`](https://cockroachlabs.com/docs/v23.2/show-jobs). [#115567][#115567] -- The `physical_replication.frontier_lag_nanos` metric and the related DB Console graph have been removed because they sometimes display incorrect information. For [alerting](https://cockroachlabs.com/docs/v23.2/physical-cluster-replication-monitoring#prometheus), it is recommended to use the new metric `physical_replication.replicated_time_seconds` metric instead. [#115234][#115234] -- Fixed a bug in [physical cluster replication](https://cockroachlabs.com/docs/v23.2/physical-cluster-replication-overview) where replicating from a primary cluster that is on a version prior to v23.2.x to a standby cluster running on v23.2.x could fail because of an undefined builtin function in the primary cluster. [#114257][#114257] +- When using [Physical Cluster Replication](https://cockroachlabs.com/docs/v23.2/physical-cluster-replication-overview), you can now [initiate a cutover](https://cockroachlabs.com/docs/v23.2/cutover-replication) as of `LATEST` before the initial scan completes. #115101 +- Sensitive information such as `api_secret`, `sasl_password`, `client_cert`, and `ca_cert`, is now redacted in output from commands `SHOW CHANGEFEED JOB`, `SHOW CHANGEFEED JOBS`, and [`SHOW JOBS`](https://cockroachlabs.com/docs/v23.2/show-jobs). #115567 +- The `physical_replication.frontier_lag_nanos` metric and the related DB Console graph have been removed because they sometimes display incorrect information. For [alerting](https://cockroachlabs.com/docs/v23.2/physical-cluster-replication-monitoring#prometheus), it is recommended to use the new metric `physical_replication.replicated_time_seconds` metric instead. #115234 +- Fixed a bug in [physical cluster replication](https://cockroachlabs.com/docs/v23.2/physical-cluster-replication-overview) where replicating from a primary cluster that is on a version prior to v23.2.x to a standby cluster running on v23.2.x could fail because of an undefined builtin function in the primary cluster. #114257

DB Console changes

-- In the [Changeeds dashboard]({% link v23.2/ui-cdc-dashboard.md %}), the **Max Checkpoint Latency** chart title now refers to "Lag" rather than "Latency", to better reflect the intention of the underlying metric, which measures how recently the changefeed was last checkpointed. [#115003][#115003] -- Times on the X-Axis of bar charts in **Statement details** pages are now correctly formatted in UTC. [#115220][#115220] -- In the **SQL Activity** **Transaction Details** page, you can now view a transaction fingerprint ID across multiple applications by specifying the application name in the `appNames` URL `GET` parameter using a comma-separated encoded string of transaction fingerprint IDs. [#115204][#115204] +- In the [Changeeds dashboard]({% link v23.2/ui-cdc-dashboard.md %}), the **Max Checkpoint Latency** chart title now refers to "Lag" rather than "Latency", to better reflect the intention of the underlying metric, which measures how recently the changefeed was last checkpointed. #115003 +- Times on the X-Axis of bar charts in **Statement details** pages are now correctly formatted in UTC. #115220 +- In the **SQL Activity** **Transaction Details** page, you can now view a transaction fingerprint ID across multiple applications by specifying the application name in the `appNames` URL `GET` parameter using a comma-separated encoded string of transaction fingerprint IDs. #115204

Bug fixes

-- Fixed a bug that prevented the **Now** button on time range selectors in the DB Console from working as expected when a custom time period was previously selected. [#115514][#115514] -- Fixed a bug that prevented the **SQL Activity** page from showing internal statements when the `sql.stats.response.show_internal.enabled` [cluster setting](https://cockroachlabs.com/docs/v23.2/cluster-settings) was set to `true`. [#114824][#114824] -- Fixed a bug where an active replication report update could get stuck in a retry loop on clusters with over 10000 ranges. This could prevent a node from shutting down cleanly. [#114178][#114178] -- Fixed a bug introduced in v23.1 that could cause an internal error when using the text format (as opposed to binary) when [preparing a statement](https://www.cockroachlabs.com/docs/v23.2/sql-grammar#prepare_stmt) with a user-defined composite type. [#115064][#115064] -- Fixed a bug that could cause a replica to be stuck processing in a queue's replica set when the replica had recently been removed from purgatory for processing but was destroyed, or the replica's ID changed before being processed. These replicas are now removed from the queue when they are encountered. [#115037][#115037] -- Fixed a bug that could cause a [prepared statement](https://www.cockroachlabs.com/docs/v23.2/sql-grammar#prepare_stmt) to fail if it references both an `enum` and a table that has undergone a schema change. [#115132][#115132] -- Fixed a bug that could cause cluster version finalization to contend with descriptor lease renewals on large clusters. Descriptor lease renewals previously had a higher priority than cluster upgrade finalization. Finalization now always has a higher priority than descriptor lease renewal. [#115034][#115034] -- Fixed a bug that prevented [backups](https://cockroachlabs.com/docs/v23.2/backup) from distributing work evenly across all replicas, including followers, regardless of leaseholder placement. [#115019][#115019] -- Fixed a bug introduced in v23.2.0-beta.1 that could cause a single composite-typed variable to be incorrectly handled as the target of a PostgreSQL `INTO` clause. [#115404][#115404] -- Fixed a bug that could cause a `BEGIN` statement log to record incorrect information in the `Age` field, which could also cause them to appear erroneously in slow-query logs. [#115259][#115259] +- Fixed a bug that prevented the **Now** button on time range selectors in the DB Console from working as expected when a custom time period was previously selected. #115514 +- Fixed a bug that prevented the **SQL Activity** page from showing internal statements when the `sql.stats.response.show_internal.enabled` [cluster setting](https://cockroachlabs.com/docs/v23.2/cluster-settings) was set to `true`. #114824 +- Fixed a bug where an active replication report update could get stuck in a retry loop on clusters with over 10000 ranges. This could prevent a node from shutting down cleanly. #114178 +- Fixed a bug introduced in v23.1 that could cause an internal error when using the text format (as opposed to binary) when [preparing a statement](https://www.cockroachlabs.com/docs/v23.2/sql-grammar#prepare_stmt) with a user-defined composite type. #115064 +- Fixed a bug that could cause a replica to be stuck processing in a queue's replica set when the replica had recently been removed from purgatory for processing but was destroyed, or the replica's ID changed before being processed. These replicas are now removed from the queue when they are encountered. #115037 +- Fixed a bug that could cause a [prepared statement](https://www.cockroachlabs.com/docs/v23.2/sql-grammar#prepare_stmt) to fail if it references both an `enum` and a table that has undergone a schema change. #115132 +- Fixed a bug that could cause cluster version finalization to contend with descriptor lease renewals on large clusters. Descriptor lease renewals previously had a higher priority than cluster upgrade finalization. Finalization now always has a higher priority than descriptor lease renewal. #115034 +- Fixed a bug that prevented [backups](https://cockroachlabs.com/docs/v23.2/backup) from distributing work evenly across all replicas, including followers, regardless of leaseholder placement. #115019 +- Fixed a bug introduced in v23.2.0-beta.1 that could cause a single composite-typed variable to be incorrectly handled as the target of a PostgreSQL `INTO` clause. #115404 +- Fixed a bug that could cause a `BEGIN` statement log to record incorrect information in the `Age` field, which could also cause them to appear erroneously in slow-query logs. #115259

Performance improvements

-- Query planning time has been reduced significantly for some queries in which many tables are joined. [#114445][#114445] +- Query planning time has been reduced significantly for some queries in which many tables are joined. #114445
@@ -47,26 +47,3 @@ This release includes 91 merged PRs by 35 authors.
-[#114178]: https://github.com/cockroachdb/cockroach/pull/114178 -[#114445]: https://github.com/cockroachdb/cockroach/pull/114445 -[#114824]: https://github.com/cockroachdb/cockroach/pull/114824 -[#114998]: https://github.com/cockroachdb/cockroach/pull/114998 -[#115003]: https://github.com/cockroachdb/cockroach/pull/115003 -[#115037]: https://github.com/cockroachdb/cockroach/pull/115037 -[#115064]: https://github.com/cockroachdb/cockroach/pull/115064 -[#115101]: https://github.com/cockroachdb/cockroach/pull/115101 -[#115132]: https://github.com/cockroachdb/cockroach/pull/115132 -[#115145]: https://github.com/cockroachdb/cockroach/pull/115145 -[#115181]: https://github.com/cockroachdb/cockroach/pull/115181 -[#114257]: https://github.com/cockroachdb/cockroach/pull/114257 -[#115019]: https://github.com/cockroachdb/cockroach/pull/115019 -[#115034]: https://github.com/cockroachdb/cockroach/pull/115034 -[#115204]: https://github.com/cockroachdb/cockroach/pull/115204 -[#115220]: https://github.com/cockroachdb/cockroach/pull/115220 -[#115234]: https://github.com/cockroachdb/cockroach/pull/115234 -[#115259]: https://github.com/cockroachdb/cockroach/pull/115259 -[#115404]: https://github.com/cockroachdb/cockroach/pull/115404 -[#115481]: https://github.com/cockroachdb/cockroach/pull/115481 -[#115514]: https://github.com/cockroachdb/cockroach/pull/115514 -[#115567]: https://github.com/cockroachdb/cockroach/pull/115567 -[#115604]: https://github.com/cockroachdb/cockroach/pull/115604 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-beta.3.md b/src/current/_includes/releases/v23.2/v23.2.0-beta.3.md index b99be77d2a6..511d9c707e8 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-beta.3.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-beta.3.md @@ -6,24 +6,24 @@ Release Date: December 13, 2023

General changes

-- Updated Go version to 1.21.3. [#116098][#116098] +- Updated Go version to 1.21.3. #116098

SQL language changes

-- Added the `sql.ttl.default_select_rate_limit` [cluster setting](../v23.2/cluster-settings.html) and the `ttl_select_rate_limit` [table storage parameter](../v23.2/with-storage-parameter.html#table-parameters) to set the TTL select rate limit. This sets the number of records per table per second per node that can be selected by the TTL job. [#115802][#115802] +- Added the `sql.ttl.default_select_rate_limit` [cluster setting](../v23.2/cluster-settings.html) and the `ttl_select_rate_limit` [table storage parameter](../v23.2/with-storage-parameter.html#table-parameters) to set the TTL select rate limit. This sets the number of records per table per second per node that can be selected by the TTL job. #115802

Bug fixes

-- Fixed a bug that could result in an incorrect `too few columns` error for queries that use `ANY ` syntax with a subquery. [#115592][#115592] -- Fixed a bug that could cause `too few columns`/`too many columns` errors for queries that used `IN` or `NOT IN` with a non-trivial right operand, such as a subquery (rather than a constant tuple). [#115592][#115592] -- Fixed a bug where [`CREATE INDEX`](../v23.2/create-index.html) with expressions could fail on materialized [views](../v23.2/views.html) when the declarative schema changer was used. [#115522][#115522] -- Fixed a bug that could cause PL/pgSQL routines with `SELECT INTO` syntax to return early. This bug existed only in pre-release versions v23.2.0-beta.1 and v23.2.0-beta.2. [#115676][#115676] -- Fixed a bug that could cause side effects to happen out of order for PL/pgSQL routines in rare cases. This bug existed only in v23.2 alpha versions and previous v23.2 beta versions. [#115840][#115840] -- Previously, in rare cases, CockroachDB could incorrectly evaluate queries with lookup [joins](../v23.2/joins.html) where `equality cols are key` when performing lookups on multiple ranges. This could either manifest as a stuck query or result in incorrect output. The bug was introduced in v22.2 and is now fixed. [#115580][#115580] -- Fixed a durability bug in Raft log storage that was caused by incorrect syncing of filesystem metadata. It was possible to lose writes of a particular kind (`AddSSTable`) used by (e.g.) `RESTORE`. This loss was possible only under power-off or OS crash conditions. As a result, CockroachDB could enter a crash loop on restart. In the worst case of a coordinated power-off/crash across multiple nodes, this could lead to an unrecoverable loss of quorum. [#115841][#115841] -- Fixed a bug where large jobs running with [`execution locality`](../v23.2/take-locality-restricted-backups.html) option could result in the [gateway node](../v23.2/architecture/sql-layer.html#gateway-node) being assigned most of the work causing performance degradation and cluster instability. [#115876][#115876] -- Fixed a bug that prevented naming UDT parameters when [dropping a user-defined function](../v23.2/drop-function.html) (or procedure). This bug has existed since v23.1. [#115905][#115905] -- Locking tables (e.g., with [SELECT ... FOR UPDATE](../v23.2/select-for-update.html)) on the null-extended side of outer joins (e.g., the right side of a `LEFT JOIN`) is now disallowed and returns an error. This improves compatibility with PostgreSQL and prevents ambiguity in locking semantics. This bug has existed since locking with `FOR UPDATE` was introduced. [#115879][#115879] +- Fixed a bug that could result in an incorrect `too few columns` error for queries that use `ANY ` syntax with a subquery. #115592 +- Fixed a bug that could cause `too few columns`/`too many columns` errors for queries that used `IN` or `NOT IN` with a non-trivial right operand, such as a subquery (rather than a constant tuple). #115592 +- Fixed a bug where [`CREATE INDEX`](../v23.2/create-index.html) with expressions could fail on materialized [views](../v23.2/views.html) when the declarative schema changer was used. #115522 +- Fixed a bug that could cause PL/pgSQL routines with `SELECT INTO` syntax to return early. This bug existed only in pre-release versions v23.2.0-beta.1 and v23.2.0-beta.2. #115676 +- Fixed a bug that could cause side effects to happen out of order for PL/pgSQL routines in rare cases. This bug existed only in v23.2 alpha versions and previous v23.2 beta versions. #115840 +- Previously, in rare cases, CockroachDB could incorrectly evaluate queries with lookup [joins](../v23.2/joins.html) where `equality cols are key` when performing lookups on multiple ranges. This could either manifest as a stuck query or result in incorrect output. The bug was introduced in v22.2 and is now fixed. #115580 +- Fixed a durability bug in Raft log storage that was caused by incorrect syncing of filesystem metadata. It was possible to lose writes of a particular kind (`AddSSTable`) used by (e.g.) `RESTORE`. This loss was possible only under power-off or OS crash conditions. As a result, CockroachDB could enter a crash loop on restart. In the worst case of a coordinated power-off/crash across multiple nodes, this could lead to an unrecoverable loss of quorum. #115841 +- Fixed a bug where large jobs running with [`execution locality`](../v23.2/take-locality-restricted-backups.html) option could result in the [gateway node](../v23.2/architecture/sql-layer.html#gateway-node) being assigned most of the work causing performance degradation and cluster instability. #115876 +- Fixed a bug that prevented naming UDT parameters when [dropping a user-defined function](../v23.2/drop-function.html) (or procedure). This bug has existed since v23.1. #115905 +- Locking tables (e.g., with [SELECT ... FOR UPDATE](../v23.2/select-for-update.html)) on the null-extended side of outer joins (e.g., the right side of a `LEFT JOIN`) is now disallowed and returns an error. This improves compatibility with PostgreSQL and prevents ambiguity in locking semantics. This bug has existed since locking with `FOR UPDATE` was introduced. #115879
@@ -33,14 +33,3 @@ This release includes 26 merged PRs by 20 authors.
-[#115522]: https://github.com/cockroachdb/cockroach/pull/115522 -[#115580]: https://github.com/cockroachdb/cockroach/pull/115580 -[#115592]: https://github.com/cockroachdb/cockroach/pull/115592 -[#115676]: https://github.com/cockroachdb/cockroach/pull/115676 -[#115802]: https://github.com/cockroachdb/cockroach/pull/115802 -[#115840]: https://github.com/cockroachdb/cockroach/pull/115840 -[#115841]: https://github.com/cockroachdb/cockroach/pull/115841 -[#115876]: https://github.com/cockroachdb/cockroach/pull/115876 -[#115879]: https://github.com/cockroachdb/cockroach/pull/115879 -[#115905]: https://github.com/cockroachdb/cockroach/pull/115905 -[#116098]: https://github.com/cockroachdb/cockroach/pull/116098 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-rc.1.md b/src/current/_includes/releases/v23.2/v23.2.0-rc.1.md index be9223ac6d7..57462f2fea6 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-rc.1.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-rc.1.md @@ -6,35 +6,35 @@ Release Date: December 21, 2023

{{ site.data.products.enterprise }} edition changes

-- Added a SQL function `crdb_internal.fips_ready()` that can be used to verify the [FIPS](../v23.2/fips.html) readiness of the gateway node. [#116281][#116281] -- [Physical cluster replication](../v23.2/physical-cluster-replication-overview.html) now retries for just over 3 minutes before failing. [#116404][#116404] +- Added a SQL function `crdb_internal.fips_ready()` that can be used to verify the [FIPS](../v23.2/fips.html) readiness of the gateway node. #116281 +- [Physical cluster replication](../v23.2/physical-cluster-replication-overview.html) now retries for just over 3 minutes before failing. #116404

SQL language changes

-- `CALL` statements can now be run with [`EXPLAIN`](../v23.2/explain.html). The `EXPLAIN (OPT)` variant will show the body of the procedure, while other variants will show only the procedure name and arguments. [#116273][#116273] -- Added support for [`IMPORT INTO`](../v23.2/import-into.html) a table that has columns typed as arrays of user-defined types (like [`ENUM`](../v23.2/enum.html)). Tables that use multiple user-defined types with the same name but different schemas are still unsupported. [#116360][#116360] -- The [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) implementation used under Read Committed isolation (and under [Serializable isolation](../v23.2/demo-serializable.html) when [`optimizer_use_lock_op_for_serializable`](../v23.2/set-vars.html) is set to `true`) now locks all [column families](../v23.2/column-families.html) instead of only the first column family. [#116828][#116828] +- `CALL` statements can now be run with [`EXPLAIN`](../v23.2/explain.html). The `EXPLAIN (OPT)` variant will show the body of the procedure, while other variants will show only the procedure name and arguments. #116273 +- Added support for [`IMPORT INTO`](../v23.2/import-into.html) a table that has columns typed as arrays of user-defined types (like [`ENUM`](../v23.2/enum.html)). Tables that use multiple user-defined types with the same name but different schemas are still unsupported. #116360 +- The [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) implementation used under Read Committed isolation (and under [Serializable isolation](../v23.2/demo-serializable.html) when [`optimizer_use_lock_op_for_serializable`](../v23.2/set-vars.html) is set to `true`) now locks all [column families](../v23.2/column-families.html) instead of only the first column family. #116828

Command-line changes

-- Added the command [`cockroach debug enterprise-check-fips`](../v23.2/cockroach-commands.html) that diagnoses errors in [FIPS](../v23.2/fips.html) deployments. [#116281][#116281] -- Added the flag `--enterprise-require-fips-ready` that can be run with any [CockroachDB command](../v23.2/cockroach-commands.html) to prevent startup if certain prerequisites for [FIPS](../v23.2/fips.html) compliance are not met. [#116281][#116281] +- Added the command [`cockroach debug enterprise-check-fips`](../v23.2/cockroach-commands.html) that diagnoses errors in [FIPS](../v23.2/fips.html) deployments. #116281 +- Added the flag `--enterprise-require-fips-ready` that can be run with any [CockroachDB command](../v23.2/cockroach-commands.html) to prevent startup if certain prerequisites for [FIPS](../v23.2/fips.html) compliance are not met. #116281

DB Console changes

-- Updated the **CPU Time** label to **SQL CPU Time** and added clarification to its tooltip on the [SQL Activity](../v23.2/ui-overview.html#sql-activity) and [Insights](../v23.2/ui-insights-page.html) pages. [#116450][#116450] -- Removed the ID when it is `undefined` from the event description in the [Metrics Events Panel](../v23.2/ui-overview-dashboard.html#events-panel). [#116519][#116519] +- Updated the **CPU Time** label to **SQL CPU Time** and added clarification to its tooltip on the [SQL Activity](../v23.2/ui-overview.html#sql-activity) and [Insights](../v23.2/ui-insights-page.html) pages. #116450 +- Removed the ID when it is `undefined` from the event description in the [Metrics Events Panel](../v23.2/ui-overview-dashboard.html#events-panel). #116519

Bug fixes

-- Fixed a bug that caused node crashes and panics when running [`INSERT`](../v23.2/insert.html) queries on [`REGIONAL BY ROW`](../v23.2/table-localities.html#regional-by-row-tables) tables with `UNIQUE` constraints or indexes. The bug is only present in version v23.2.0-beta.1. [#116343][#116343] -- [`UPDATE`](../v23.2/update.html), [`UPSERT`](../v23.2/upsert.html), and [`INSERT ON CONFLICT`](../v23.2/insert.html#on-conflict-clause) queries are now disallowed under Read Committed isolation when the table contains a [`CHECK` constraint](../v23.2/check.html) involving a [column family](../v23.2/column-families.html) that is updated, and that `CHECK` constraint also involves a column family that is **not** updated, but **is** read. This restriction is a temporary fix to prevent possible violation of the `CHECK` constraint. However, it is important to note that this restriction will be lifted in the future. [#116429][#116429] -- Fixed a bug where [scheduled jobs](../v23.2/show-schedules.html) using [external storage providers](../v23.2/use-cloud-storage.html) may fail shortly after node startup. [#116205][#116205] -- Fixed the formatting for `plpgsql` routines, which could prevent the creation of a routine with loop labels and could prevent some expressions from being redacted correctly. The bug only existed in alpha and beta versions of v23.2. [#116711][#116711] -- Fixed a bug that would cause a syntax error during redaction of a PL/pgSQL routine. The bug existed only in alpha and beta versions of the v23.2 release. [#116711][#116711] -- Fixed a bug that would cause syntax errors when attempting to [restore a database](../v23.2/restore.html#restore-a-database) with [PL/pgSQL UDFs](https://www.postgresql.org/docs/current/sql-createfunction.html) or stored [procedures](https://www.postgresql.org/docs/16/sql-createprocedure.html). This bug only affected alpha and beta versions of v23.2. [#116711][#116711] -- Fixed a bug in PL/pgSQL where altering the name of a [sequence](../v23.2/create-sequence.html) or UDT that was used in a [PL/pgSQL function](https://www.postgresql.org/docs/current/sql-createfunction.html) or [procedure](https://www.postgresql.org/docs/16/sql-createprocedure.html) could break them. This is only present in v23.2 alpha and beta releases. [#116420][#116420] -- Fixed a bug where [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) under Read Committed isolation on multi-column-family tables was not locking [column families](../v23.2/column-families.html) containing only key columns. [#116828][#116828] +- Fixed a bug that caused node crashes and panics when running [`INSERT`](../v23.2/insert.html) queries on [`REGIONAL BY ROW`](../v23.2/table-localities.html#regional-by-row-tables) tables with `UNIQUE` constraints or indexes. The bug is only present in version v23.2.0-beta.1. #116343 +- [`UPDATE`](../v23.2/update.html), [`UPSERT`](../v23.2/upsert.html), and [`INSERT ON CONFLICT`](../v23.2/insert.html#on-conflict-clause) queries are now disallowed under Read Committed isolation when the table contains a [`CHECK` constraint](../v23.2/check.html) involving a [column family](../v23.2/column-families.html) that is updated, and that `CHECK` constraint also involves a column family that is **not** updated, but **is** read. This restriction is a temporary fix to prevent possible violation of the `CHECK` constraint. However, it is important to note that this restriction will be lifted in the future. #116429 +- Fixed a bug where [scheduled jobs](../v23.2/show-schedules.html) using [external storage providers](../v23.2/use-cloud-storage.html) may fail shortly after node startup. #116205 +- Fixed the formatting for `plpgsql` routines, which could prevent the creation of a routine with loop labels and could prevent some expressions from being redacted correctly. The bug only existed in alpha and beta versions of v23.2. #116711 +- Fixed a bug that would cause a syntax error during redaction of a PL/pgSQL routine. The bug existed only in alpha and beta versions of the v23.2 release. #116711 +- Fixed a bug that would cause syntax errors when attempting to [restore a database](../v23.2/restore.html#restore-a-database) with [PL/pgSQL UDFs](https://www.postgresql.org/docs/current/sql-createfunction.html) or stored [procedures](https://www.postgresql.org/docs/16/sql-createprocedure.html). This bug only affected alpha and beta versions of v23.2. #116711 +- Fixed a bug in PL/pgSQL where altering the name of a [sequence](../v23.2/create-sequence.html) or UDT that was used in a [PL/pgSQL function](https://www.postgresql.org/docs/current/sql-createfunction.html) or [procedure](https://www.postgresql.org/docs/16/sql-createprocedure.html) could break them. This is only present in v23.2 alpha and beta releases. #116420 +- Fixed a bug where [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) under Read Committed isolation on multi-column-family tables was not locking [column families](../v23.2/column-families.html) containing only key columns. #116828 - Fixed a bug where all `AggHistogram`-powered metrics were not reporting quantiles properly in the [DB Console](../v23.2/ui-overview.html). The quantiles in the DB Console are now reported correctly. This bug was only present in histograms in the [DB Console metrics](../v23.2/ui-overview-dashboard.html) features, and did **not** affect metrics reporting in the [Prometheus-compatible](../v23.2/monitor-cockroachdb-with-prometheus.html) endpoint, `/_status/vars`. The affected metrics were: - `changefeed.message_size_hist` - `changefeed.parallel_io_queue_nanos` @@ -46,24 +46,9 @@ Release Date: December 21, 2023 - `jobs.row_level_ttl.select_duration` - `jobs.row_level_ttl.delete_duration` - [#116871][#116871] + #116871

Contributors

This release includes 49 merged PRs by 26 authors. -[#116205]: https://github.com/cockroachdb/cockroach/pull/116205 -[#116273]: https://github.com/cockroachdb/cockroach/pull/116273 -[#116281]: https://github.com/cockroachdb/cockroach/pull/116281 -[#116343]: https://github.com/cockroachdb/cockroach/pull/116343 -[#116360]: https://github.com/cockroachdb/cockroach/pull/116360 -[#116392]: https://github.com/cockroachdb/cockroach/pull/116392 -[#116404]: https://github.com/cockroachdb/cockroach/pull/116404 -[#116420]: https://github.com/cockroachdb/cockroach/pull/116420 -[#116429]: https://github.com/cockroachdb/cockroach/pull/116429 -[#116450]: https://github.com/cockroachdb/cockroach/pull/116450 -[#116519]: https://github.com/cockroachdb/cockroach/pull/116519 -[#116711]: https://github.com/cockroachdb/cockroach/pull/116711 -[#116828]: https://github.com/cockroachdb/cockroach/pull/116828 -[#116871]: https://github.com/cockroachdb/cockroach/pull/116871 -[#116897]: https://github.com/cockroachdb/cockroach/pull/116897 diff --git a/src/current/_includes/releases/v23.2/v23.2.0-rc.2.md b/src/current/_includes/releases/v23.2/v23.2.0-rc.2.md index 7cdcddc0a66..c9254d31a21 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0-rc.2.md +++ b/src/current/_includes/releases/v23.2/v23.2.0-rc.2.md @@ -6,12 +6,10 @@ Release Date: January 9, 2024

Bug fixes

-- Fixed a bug introduced in v23.2 that caused internal errors and panics when certain queries ran with automatic [index recommendation collection enabled](../v23.2/cluster-settings.html#setting-sql-metrics-statement-details-index-recommendation-collection-enabled). [#117454][#117454] -- Fixed a bug where mixed-version clusters with both v23.1 and v23.2 nodes could detect a false-positive replica inconsistency in [`GLOBAL` tables](../v23.2/global-tables.html). [#117341][#117341] +- Fixed a bug introduced in v23.2 that caused internal errors and panics when certain queries ran with automatic [index recommendation collection enabled](../v23.2/cluster-settings.html#setting-sql-metrics-statement-details-index-recommendation-collection-enabled). #117454 +- Fixed a bug where mixed-version clusters with both v23.1 and v23.2 nodes could detect a false-positive replica inconsistency in [`GLOBAL` tables](../v23.2/global-tables.html). #117341

Contributors

This release includes 12 merged PRs by 9 authors. -[#117341]: https://github.com/cockroachdb/cockroach/pull/117341 -[#117454]: https://github.com/cockroachdb/cockroach/pull/117454 diff --git a/src/current/_includes/releases/v23.2/v23.2.0.md b/src/current/_includes/releases/v23.2/v23.2.0.md index 9e550e06224..f54e3d07f91 100644 --- a/src/current/_includes/releases/v23.2/v23.2.0.md +++ b/src/current/_includes/releases/v23.2/v23.2.0.md @@ -339,12 +339,12 @@ In CockroachDB Self-Hosted, all available features are free to use unless their Before [upgrading to CockroachDB v23.2]({% link v23.2/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as [key cluster setting changes](#v23-2-0-cluster-settings), and adjust your deployment as necessary. -- The pre-v23.1 output produced by `SHOW RANGES`, `crdb_internal.ranges`, and `crdb_internal.ranges_no_leases` was deprecated in v23.1 and is now replaced by default with output that's compatible with coalesced ranges (anges that pack multiple tables/indexes/partitions into individual ranges). See the [v23.1 release notes]({% link releases/v23.1.md %}) for `SHOW RANGES` for more details. [#102961][#102961] -- When a deployment is configured to use a time zone for log file output using formats `crdb-v1` or `crdb-v2`, new output log entries cannot be processed by nodes that have not been upgraded to v23.2. [#104265][#104265] -- When customizing the [SQL shell's interactive prompt]({% link v23.2/cockroach-sql.md %}), the special sequence `%M` now expands to the full host name instead of the combination of host name and port number. To include the port number explicitly, use `%>`. The special sequence `%m` now expands to the host name up to the first period. [#105137][#105137] -- The [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) command stores data retrieved from SQL tables in the remote cluster using the TSV format by default. [#107474][#107474] -- The [`changefeed.protect_timestamp.max_age` cluster setting]({% link v23.2/protect-changefeed-data.md %}) will only apply to newly created changefeeds in v23.2. For existing changefeeds, you can set the [`protect_data_from_gc_on_pause`]({% link v23.2/create-changefeed.md %}#protect-pause) option so that changefeeds do not experience infinite retries and accumulate protected change data. You can use the [`ALTER CHANGEFEED`]({% link v23.2/alter-changefeed.md %}) statement to add `protect_data_from_gc_on_pause` to existing changefeeds. [#103539][#103539] -- The direct export of traces to Jaeger and the [cluster setting](../v23.2/cluster-settings.html) `trace.jaeger.agent` have been removed. The direct export functionality had been obsoleted since 2022; it stopped working altogether sometime in 2023 with the following error: `data does not fit within one UDP packet; size 65006, max 65000, spans NN`. Since 2022, Jaeger supports ingestion of traces using OTLP; and CockroachDB has supported emitting traces using OTLP since v22.1. Operators and developers who want to inspect traces are thus invited to use the OTLP protocol instead. The corresponding cluster setting is `trace.opentelemetry.collector`. For a successful deployment, an intermediate OTLP collector/forwarder should be configured. For an example of how to orchestrate the OpenTelemetry collector and Jaeger together using Docker Compose, or how to configure the `otel-collector`, see the more-detailed entry in [v23.2-alpha.3 backward-incompatible changes](#v23-2-0-alpha-3-backward-incompatible-changes). [#111342][#111342] +- The pre-v23.1 output produced by `SHOW RANGES`, `crdb_internal.ranges`, and `crdb_internal.ranges_no_leases` was deprecated in v23.1 and is now replaced by default with output that's compatible with coalesced ranges (anges that pack multiple tables/indexes/partitions into individual ranges). See the [v23.1 release notes]({% link releases/v23.1.md %}) for `SHOW RANGES` for more details. #102961 +- When a deployment is configured to use a time zone for log file output using formats `crdb-v1` or `crdb-v2`, new output log entries cannot be processed by nodes that have not been upgraded to v23.2. #104265 +- When customizing the [SQL shell's interactive prompt]({% link v23.2/cockroach-sql.md %}), the special sequence `%M` now expands to the full host name instead of the combination of host name and port number. To include the port number explicitly, use `%>`. The special sequence `%m` now expands to the host name up to the first period. #105137 +- The [`cockroach debug zip`]({% link v23.2/cockroach-debug-zip.md %}) command stores data retrieved from SQL tables in the remote cluster using the TSV format by default. #107474 +- The [`changefeed.protect_timestamp.max_age` cluster setting]({% link v23.2/protect-changefeed-data.md %}) will only apply to newly created changefeeds in v23.2. For existing changefeeds, you can set the [`protect_data_from_gc_on_pause`]({% link v23.2/create-changefeed.md %}#protect-pause) option so that changefeeds do not experience infinite retries and accumulate protected change data. You can use the [`ALTER CHANGEFEED`]({% link v23.2/alter-changefeed.md %}) statement to add `protect_data_from_gc_on_pause` to existing changefeeds. #103539 +- The direct export of traces to Jaeger and the [cluster setting](../v23.2/cluster-settings.html) `trace.jaeger.agent` have been removed. The direct export functionality had been obsoleted since 2022; it stopped working altogether sometime in 2023 with the following error: `data does not fit within one UDP packet; size 65006, max 65000, spans NN`. Since 2022, Jaeger supports ingestion of traces using OTLP; and CockroachDB has supported emitting traces using OTLP since v22.1. Operators and developers who want to inspect traces are thus invited to use the OTLP protocol instead. The corresponding cluster setting is `trace.opentelemetry.collector`. For a successful deployment, an intermediate OTLP collector/forwarder should be configured. For an example of how to orchestrate the OpenTelemetry collector and Jaeger together using Docker Compose, or how to configure the `otel-collector`, see the more-detailed entry in [v23.2-alpha.3 backward-incompatible changes](#v23-2-0-alpha-3-backward-incompatible-changes). #111342

Key Cluster Setting Changes

@@ -357,20 +357,20 @@ The following changes should be reviewed prior to upgrading. Default cluster set - `SET default_transaction_isolation = 'read committed'` - `SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED` - [#110624][#110624] -- The `sql.txn.read_committed_syntax.enabled` [cluster setting](../v23.2/cluster-settings.html) was renamed to [`sql.txn.read_committed_isolation.enabled`](../v23.2/cluster-settings.html#setting-sql-txn-read-committed-isolation-enabled). [#113833][#113833] -- Users who have the [`CREATEROLE` role option]({% link v23.2/grant.md %}) can now grant and revoke role membership in any non-admin role. This change also removes the [`sql.auth.createrole_allows_grant_role_membership.enabled` cluster setting]({% link v23.2/cluster-settings.md %}), which was added in v23.1. In v23.2, the cluster setting is effectively always true. [#104376][#104376] -- The [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.metrics.statement_details.gateway_node.enabled` now defaults to false to reduce the number of rows generated in SQL Statistics pages. [#107788][#107788] -- The [cluster setting]({% link v23.2/cluster-settings.md %}) `kv.rangefeed.enabled` no longer controls access to `RANGEFEED SQL` commands. Instead, use `feature.changefeed.enabled`. [#110676][#110676] -- The [cluster settings]({% link v23.2/cluster-settings.md %}) related to [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) have been renamed for consistency. For example, `bulkio.stream_ingestion.minimum_flush_interval` is now named `physical_replication.consumer.minimum_flush_interval`. [#111197][#111197] -- CockroachDB now periodically dumps the state of its internal memory accounting system into the `heap_profiler/` directory when a heap profile is taken. To disable this behavior, set the `diagnostics.memory_monitoring_dumps.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) to `false`. [#114998][#114998] -- Introduced the [cluster setting](../v23.2/cluster-settings.html) `kv.gc.sticky_hint.enabled` in v23.1.13. This setting helps expedite [garbage collection](../v23.2/architecture/storage-layer.html#garbage-collection) after range deletions. For example, when a SQL table or index is dropped. `kv.gc.sticky_hint.enabled` is enabled by default and deprecated in v23.2. [#113040][#113040] -- CockroachDB now enables the pacing mechanism in rangefeed closed timestamp notifications, by setting the default `kv.rangefeed.closed_timestamp_smear_interval` cluster setting to 1ms. This makes rangefeed closed timestamp delivery more uniform and less spikey, which reduces its impact on the Go scheduler and, ultimately, foreground SQL latencies. [#103006][#103006] + #110624 +- The `sql.txn.read_committed_syntax.enabled` [cluster setting](../v23.2/cluster-settings.html) was renamed to [`sql.txn.read_committed_isolation.enabled`](../v23.2/cluster-settings.html#setting-sql-txn-read-committed-isolation-enabled). #113833 +- Users who have the [`CREATEROLE` role option]({% link v23.2/grant.md %}) can now grant and revoke role membership in any non-admin role. This change also removes the [`sql.auth.createrole_allows_grant_role_membership.enabled` cluster setting]({% link v23.2/cluster-settings.md %}), which was added in v23.1. In v23.2, the cluster setting is effectively always true. #104376 +- The [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.metrics.statement_details.gateway_node.enabled` now defaults to false to reduce the number of rows generated in SQL Statistics pages. #107788 +- The [cluster setting]({% link v23.2/cluster-settings.md %}) `kv.rangefeed.enabled` no longer controls access to `RANGEFEED SQL` commands. Instead, use `feature.changefeed.enabled`. #110676 +- The [cluster settings]({% link v23.2/cluster-settings.md %}) related to [physical cluster replication](../v23.2/physical-cluster-replication-overview.html) have been renamed for consistency. For example, `bulkio.stream_ingestion.minimum_flush_interval` is now named `physical_replication.consumer.minimum_flush_interval`. #111197 +- CockroachDB now periodically dumps the state of its internal memory accounting system into the `heap_profiler/` directory when a heap profile is taken. To disable this behavior, set the `diagnostics.memory_monitoring_dumps.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) to `false`. #114998 +- Introduced the [cluster setting](../v23.2/cluster-settings.html) `kv.gc.sticky_hint.enabled` in v23.1.13. This setting helps expedite [garbage collection](../v23.2/architecture/storage-layer.html#garbage-collection) after range deletions. For example, when a SQL table or index is dropped. `kv.gc.sticky_hint.enabled` is enabled by default and deprecated in v23.2. #113040 +- CockroachDB now enables the pacing mechanism in rangefeed closed timestamp notifications, by setting the default `kv.rangefeed.closed_timestamp_smear_interval` cluster setting to 1ms. This makes rangefeed closed timestamp delivery more uniform and less spikey, which reduces its impact on the Go scheduler and, ultimately, foreground SQL latencies. #103006

Deprecations

-- The `protect_data_from_gc_on_pause` option has been deprecated. This option is no longer needed since changefeed jobs always protect data. [#103539][#103539] -- The `cockroach connect` functionality has been deprecated. [#114241][#114241] +- The `protect_data_from_gc_on_pause` option has been deprecated. This option is no longer needed since changefeed jobs always protect data. #103539 +- The `cockroach connect` functionality has been deprecated. #114241

Known limitations

@@ -390,9 +390,3 @@ Docs | [SQL Feature Support]({% link v23.2/sql-feature-support.m Docs | [Change Data Capture Overview]({% link v23.2/change-data-capture-overview.md %}) | This page summarizes CockroachDB's data streaming capabilities. Change data capture (CDC) provides efficient, distributed, row-level changefeeds into a configurable sink for downstream processing such as reporting, caching, or full-text indexing. Docs | [Backup Architecture]({% link v23.2/backup-architecture.md %}) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. -[#102961]: https://github.com/cockroachdb/cockroach/pull/102961 -[#104265]: https://github.com/cockroachdb/cockroach/pull/104265 -[#107474]: https://github.com/cockroachdb/cockroach/pull/107474 -[#103539]: https://github.com/cockroachdb/cockroach/pull/103539 -[#104265]: https://github.com/cockroachdb/cockroach/pull/104265 -[#111342]: https://github.com/cockroachdb/cockroach/pull/111342 diff --git a/src/current/_includes/releases/v23.2/v23.2.1.md b/src/current/_includes/releases/v23.2/v23.2.1.md index c8cff79259f..2a3a5068351 100644 --- a/src/current/_includes/releases/v23.2/v23.2.1.md +++ b/src/current/_includes/releases/v23.2/v23.2.1.md @@ -6,66 +6,66 @@ Release Date: February 20, 2024

Security updates

-- Introduced the `server.redact_sensitive_settings.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}), which is false by default. If set to `true`, then the values of the following settings will be redacted when accessed through `SHOW` commands or other introspection interfaces. In the future, any other sensitive cluster settings that are added will be redacted as well. Users who have the `MODIFYCLUSTERSETTING` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges) can always view the unredacted settings. [#117729][#117729] +- Introduced the `server.redact_sensitive_settings.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}), which is false by default. If set to `true`, then the values of the following settings will be redacted when accessed through `SHOW` commands or other introspection interfaces. In the future, any other sensitive cluster settings that are added will be redacted as well. Users who have the `MODIFYCLUSTERSETTING` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges) can always view the unredacted settings. #117729 - `server.oidc_authentication.client_id` - `server.oidc_authentication.client_secret` -- If the `server.redact_sensitive_settings.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) is set to `true`, then the `MANAGEVIRTUALCLUSTER` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges) is required to view the values of the per-virtual-cluster overrides for sensitive cluster settings. [#117729][#117729] -- The [DB Console]({% link v23.2/ui-overview.md %}) `session` cookie is now marked `HttpOnly` to prevent it from being read by any JavaScript code. [#119249][#119249] -- [DB Console]({% link v23.2/ui-overview.md %}) cookies are marked `Secure` for the browser when the cluster is running in secure mode. [#119249][#119249] +- If the `server.redact_sensitive_settings.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) is set to `true`, then the `MANAGEVIRTUALCLUSTER` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges) is required to view the values of the per-virtual-cluster overrides for sensitive cluster settings. #117729 +- The [DB Console]({% link v23.2/ui-overview.md %}) `session` cookie is now marked `HttpOnly` to prevent it from being read by any JavaScript code. #119249 +- [DB Console]({% link v23.2/ui-overview.md %}) cookies are marked `Secure` for the browser when the cluster is running in secure mode. #119249

General changes

-- Updated Go version to 1.21.3. [#115339][#115339] +- Updated Go version to 1.21.3. #115339

{{ site.data.products.enterprise }} edition changes

-- Added a new [SQL function]({% link v23.2/functions-and-operators.md %}) `fips_ready`, which can be used to verify the [FIPS]({% link v23.2/fips.md %}) readiness of the [gateway node](https://www.cockroachlabs.com/docs/v23.2/architecture/life-of-a-distributed-transaction#gateway). [#115202][#115202] -- [Physical Cluster Replication (PCR)]({% link v23.2/physical-cluster-replication-overview.md %}) now retries for approximately 3 minutes before failing. This is increased from 20 µs. [#116402][#116402] -- Fixed a bug where [changefeeds]({% link v23.2/create-and-configure-changefeeds.md %}) that targeted schema-locked tables could fail due to a very old highwater timestamp being incorrectly persisted. [#117961][#117961] -- Fixed a bug where creating a [changefeed]({% link v23.2/create-and-configure-changefeeds.md %}) that targeted tables with a [`DECIMAL(n)`]({% link v23.2/decimal.md %}) column (i.e., zero-scale `DECIMAL` column), `format='avro'`, and `diff` would cause a panic. [#118895][#118895] +- Added a new [SQL function]({% link v23.2/functions-and-operators.md %}) `fips_ready`, which can be used to verify the [FIPS]({% link v23.2/fips.md %}) readiness of the [gateway node](https://www.cockroachlabs.com/docs/v23.2/architecture/life-of-a-distributed-transaction#gateway). #115202 +- [Physical Cluster Replication (PCR)]({% link v23.2/physical-cluster-replication-overview.md %}) now retries for approximately 3 minutes before failing. This is increased from 20 µs. #116402 +- Fixed a bug where [changefeeds]({% link v23.2/create-and-configure-changefeeds.md %}) that targeted schema-locked tables could fail due to a very old highwater timestamp being incorrectly persisted. #117961 +- Fixed a bug where creating a [changefeed]({% link v23.2/create-and-configure-changefeeds.md %}) that targeted tables with a [`DECIMAL(n)`]({% link v23.2/decimal.md %}) column (i.e., zero-scale `DECIMAL` column), `format='avro'`, and `diff` would cause a panic. #118895

SQL language changes

-- Added the `sql.ttl.default_select_rate_limit` [cluster setting]({% link v23.2/cluster-settings.md %}) and the `ttl_select_rate_limit` table [storage parameter]({% link v23.2/row-level-ttl.md %}#ttl-storage-parameters) to set the [TTL]({% link v23.2/row-level-ttl.md %}) select rate limit. This sets the number of records per table per second per node that can be selected by the [TTL job]({% link v23.2/row-level-ttl.md %}#view-scheduled-ttl-jobs). [#115801][#115801] -- Fixed a bug in [PL/pgSQL]({% link v23.2/plpgsql.md %}) where altering the name of a [sequence]({% link v23.2/create-sequence.md %}) or [user-defined type (UDT)]({% link v23.2/create-type.md %}) that was used in a PL/pgSQL [function]({% link v23.2/user-defined-functions.md %}) or [procedure]({% link v23.2/stored-procedures.md %}) could break them. This bug was only present in v23.2 alpha and beta releases. [#116419][#116419] -- Added support for [`IMPORT INTO`]({% link v23.2/import-into.md %}) on a table that has columns typed as [arrays]({% link v23.2/array.md %}) of [user-defined types]({% link v23.2/create-type.md %}) (like [enums]({% link v23.2/enum.md %})). Tables that use multiple user-defined types with the same name but different [schemas]({% link v23.2/create-schema.md %}) are still unsupported. [#116359][#116359] -- The new [`SELECT FOR UPDATE`]({% link v23.2/select-for-update.md %}) implementation used under [Read Committed isolation]({% link v23.2/read-committed.md %}) (and under [Serializable isolation]({% link v23.2/demo-serializable.md %}) when the `optimizer_use_lock_op_for_serializable` [session variable]({% link v23.2/session-variables.md %}) is `true`) now locks all [column families]({% link v23.2/column-families.md %}) instead of only the first column family. [#116826][#116826] -- Fixed a bug where [`SELECT FOR UPDATE`]({% link v23.2/select-for-update.md %}) under [Read Committed isolation]({% link v23.2/read-committed.md %}) on multi-column-family tables was not locking [column families]({% link v23.2/column-families.md %}) containing only key columns. [#116826][#116826] -- It is now possible to run [`CALL`]({% link v23.2/call.md %}) statements with [`EXPLAIN`]({% link v23.2/explain.md %}). The `EXPLAIN (OPT)` variant will show the body of the procedure, while other variants will only show the procedure name and arguments. [#116274][#116274] -- [`EXPLAIN`]({% link v23.2/explain.md %}) output now contains detailed information about the plans for `CASCADE` actions. [#117719][#117719] +- Added the `sql.ttl.default_select_rate_limit` [cluster setting]({% link v23.2/cluster-settings.md %}) and the `ttl_select_rate_limit` table [storage parameter]({% link v23.2/row-level-ttl.md %}#ttl-storage-parameters) to set the [TTL]({% link v23.2/row-level-ttl.md %}) select rate limit. This sets the number of records per table per second per node that can be selected by the [TTL job]({% link v23.2/row-level-ttl.md %}#view-scheduled-ttl-jobs). #115801 +- Fixed a bug in [PL/pgSQL]({% link v23.2/plpgsql.md %}) where altering the name of a [sequence]({% link v23.2/create-sequence.md %}) or [user-defined type (UDT)]({% link v23.2/create-type.md %}) that was used in a PL/pgSQL [function]({% link v23.2/user-defined-functions.md %}) or [procedure]({% link v23.2/stored-procedures.md %}) could break them. This bug was only present in v23.2 alpha and beta releases. #116419 +- Added support for [`IMPORT INTO`]({% link v23.2/import-into.md %}) on a table that has columns typed as [arrays]({% link v23.2/array.md %}) of [user-defined types]({% link v23.2/create-type.md %}) (like [enums]({% link v23.2/enum.md %})). Tables that use multiple user-defined types with the same name but different [schemas]({% link v23.2/create-schema.md %}) are still unsupported. #116359 +- The new [`SELECT FOR UPDATE`]({% link v23.2/select-for-update.md %}) implementation used under [Read Committed isolation]({% link v23.2/read-committed.md %}) (and under [Serializable isolation]({% link v23.2/demo-serializable.md %}) when the `optimizer_use_lock_op_for_serializable` [session variable]({% link v23.2/session-variables.md %}) is `true`) now locks all [column families]({% link v23.2/column-families.md %}) instead of only the first column family. #116826 +- Fixed a bug where [`SELECT FOR UPDATE`]({% link v23.2/select-for-update.md %}) under [Read Committed isolation]({% link v23.2/read-committed.md %}) on multi-column-family tables was not locking [column families]({% link v23.2/column-families.md %}) containing only key columns. #116826 +- It is now possible to run [`CALL`]({% link v23.2/call.md %}) statements with [`EXPLAIN`]({% link v23.2/explain.md %}). The `EXPLAIN (OPT)` variant will show the body of the procedure, while other variants will only show the procedure name and arguments. #116274 +- [`EXPLAIN`]({% link v23.2/explain.md %}) output now contains detailed information about the plans for `CASCADE` actions. #117719

Operational changes

-- Per-node [hot ranges]({% link v23.2/ui-hot-ranges-page.md %}) logging now logs the top 5 hot ranges on the local node instead of the top 5 hot ranges cluster-wide. [#118334][#118334] +- Per-node [hot ranges]({% link v23.2/ui-hot-ranges-page.md %}) logging now logs the top 5 hot ranges on the local node instead of the top 5 hot ranges cluster-wide. #118334

Command-line changes

-- Added a new command `cockroach debug enterprise-check-fips`, which diagnoses errors in [FIPS]({% link v23.2/fips.md %}) deployments. [#115202][#115202] -- The new flag `--enterprise-require-fips-ready` can be added to any [`cockroach` command]({% link v23.2/cockroach-commands.md %}) to prevent startup if certain prerequisites for [FIPS]({% link v23.2/fips.md %}) compliance are not met. [#115202][#115202] -- [`cockroach workload`]({% link v23.2/cockroach-workload.md %}) commands now appropriately invoke `.Close` in the case of an error. [#116487][#116487] +- Added a new command `cockroach debug enterprise-check-fips`, which diagnoses errors in [FIPS]({% link v23.2/fips.md %}) deployments. #115202 +- The new flag `--enterprise-require-fips-ready` can be added to any [`cockroach` command]({% link v23.2/cockroach-commands.md %}) to prevent startup if certain prerequisites for [FIPS]({% link v23.2/fips.md %}) compliance are not met. #115202 +- [`cockroach workload`]({% link v23.2/cockroach-workload.md %}) commands now appropriately invoke `.Close` in the case of an error. #116487

DB Console changes

-- Updated the "CPU Time" label on the [Runtime Dashboard]({% link v23.2/ui-runtime-dashboard.md %}) to "SQL CPU Time" and added clarifications to its tooltip. [#116449][#116449] -- [Statement bundles]({% link v23.2/ui-statements-page.md %}#diagnostics) are now enabled for Serverless clusters. [#117529][#117529] -- The [Networking Dashboard]({% link v23.2/ui-networking-dashboard.md %}) is enhanced with charts that visualize number of packets received, number of receiving packets with error, number of receiving packets that got dropped, number of packets sent, number of sending packets with error, and number of sending packets that got dropped. [#116712][#116712] -- The [Explain Plans]({% link v23.2/ui-statements-page.md %}#explain-plans) tab is now shown for the [Statements]({% link v23.2/ui-statements-page.md %}) and [Insights]({% link v23.2/ui-insights-page.md %}) pages, for Serverless clusters. [#118169][#118169] +- Updated the "CPU Time" label on the [Runtime Dashboard]({% link v23.2/ui-runtime-dashboard.md %}) to "SQL CPU Time" and added clarifications to its tooltip. #116449 +- [Statement bundles]({% link v23.2/ui-statements-page.md %}#diagnostics) are now enabled for Serverless clusters. #117529 +- The [Networking Dashboard]({% link v23.2/ui-networking-dashboard.md %}) is enhanced with charts that visualize number of packets received, number of receiving packets with error, number of receiving packets that got dropped, number of packets sent, number of sending packets with error, and number of sending packets that got dropped. #116712 +- The [Explain Plans]({% link v23.2/ui-statements-page.md %}#explain-plans) tab is now shown for the [Statements]({% link v23.2/ui-statements-page.md %}) and [Insights]({% link v23.2/ui-insights-page.md %}) pages, for Serverless clusters. #118169

Bug fixes

-- Fixed a durability bug in [Raft log](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft) storage, caused by incorrect syncing of filesystem metadata. Previously, it was possible to lose writes of a particular kind (`AddSSTable`) that were used by e.g. [`RESTORE`]({% link v23.2/restore.md %}). This loss was possible only under power-off or operating system crash conditions. Under such conditions, CockroachDB could enter a crash loop on node restart. In the worst case of a coordinated power-off/crash across multiple nodes this could lead to an unrecoverable loss of [Raft quorum](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft). [#115709][#115709] -- Fixed a bug where large [jobs]({% link v23.2/show-jobs.md %}) running with execution locality (such as some [changefeeds]({% link v23.2/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality)) could result in the [gateway](https://www.cockroachlabs.com/docs/v23.2/architecture/life-of-a-distributed-transaction#gateway) node being assigned most of the work, causing performance degradation and cluster instability. [#115388][#115388] -- Fixed a bug that caused node crashes and panics when running [`INSERT`]({% link v23.2/insert.md %}) queries on [`REGIONAL BY ROW` tables with `UNIQUE` constraints or indexes]({% link v23.2/alter-table.md %}#add-a-unique-index-to-a-regional-by-row-table). The bug was only present in v23.2.0-beta.1. [#115668][#115668] -- Fixed a bug that existed only in v23.2 alpha and beta versions that could have caused side effects to happen out of order for [PL/pgSQL]({% link v23.2/plpgsql.md %}) routines in rare cases. [#115839][#115839] -- Fixed a bug that existed since v23.1 that prevented naming [user-defined type (UDT)]({% link v23.2/create-type.md %}) parameters when dropping a [user-defined function]({% link v23.2/user-defined-functions.md %}) (or procedure). [#115904][#115904] -- Fixed a bug where [scheduled jobs]({% link v23.2/show-schedules.md %}) using [external storage providers]({% link v23.2/create-external-connection.md %}) could fail shortly after node startup. [#115693][#115693] -- Locking tables (e.g., with [`SELECT FOR UPDATE`]({% link v23.2/select-for-update.md %})) on the null-extended side of [outer joins]({% link v23.2/joins.md %}) (e.g., the right side of a `LEFT JOIN`) is now disallowed and returns an error. This improves compatibility with PostgreSQL and prevents ambiguity in [locking semantics](https://www.cockroachlabs.com/docs/v23.2/architecture/transaction-layer#concurrency-control). This bug has existed since locking with `FOR UPDATE` was introduced. [#115878][#115878] -- Fixed a display bug in the [DB Console]({% link v23.2/ui-overview.md %}) where because not all types of [schema changes]({% link v23.2/online-schema-changes.md %}) are setting the value for the mutation ID, the value of the ID could previously show as "with ID undefined" on the [Events panel]({% link v23.2/ui-overview-dashboard.md %}#events-panel). Now, the notification omits the undefined value (the rest of the event notification is still displayed). [#116518][#116518] -- Fixed the formatting for [PL/pgSQL]({% link v23.2/plpgsql.md %}) routines, which could prevent creating a routine with [loop labels]({% link v23.2/create-procedure.md %}#create-a-stored-procedure-that-uses-a-while-loop), and could prevent some expressions from being [redacted]({% link v23.2/configure-logs.md %}#redact-logs) correctly. The bug only existed in v23.2 alpha and beta releases. [#116713][#116713] -- Fixed a bug that would cause a syntax error during [redaction]({% link v23.2/configure-logs.md %}#redact-logs) of a [PL/pgSQL]({% link v23.2/plpgsql.md %}) routine. The bug existed only in v23.2 alpha and beta releases. [#116713][#116713] -- Fixed a bug that would cause syntax errors when attempting to [`RESTORE`]({% link v23.2/restore.md %}) a database with [PL/pgSQL]({% link v23.2/plpgsql.md %}) [user-defined functions (UDFs)]({% link v23.2/user-defined-functions.md %}) or [stored procedures]({% link v23.2/stored-procedures.md %}). This bug only affected v23.2 alpha and beta releases. [#116713][#116713] -- [`UPDATE`]({% link v23.2/update.md %}), [`UPSERT`]({% link v23.2/upsert.md %}), and [`INSERT ON CONFLICT`]({% link v23.2/insert.md %}#on-conflict-clause) queries are now disallowed under [Read Committed isolation]({% link v23.2/read-committed.md %}) when the table contains a [check constraint]({% link v23.2/check.md %}) involving a [column family]({% link v23.2/column-families.md %}) that is updated, and the check constraint also involves a column family that is **not** updated, but **is** read. This is a temporary fix to prevent possible violation of the check constraint, and the restriction will be lifted in the future. [#116428][#116428] -- Previously, all `AggHistogram`-powered metrics were not reporting quantiles properly in the [DB Console]({% link v23.2/ui-overview.md %}). This patch fixes the histograms so that the quantiles in DB Console are reported correctly. these histograms were only broken in the [DB Console metrics dashboards]({% link v23.2/ui-overview-dashboard.md %}), but were **not** broken in the [Prometheus-compatible endpoint]({% link v23.2/monitoring-and-alerting.md %}#prometheus-endpoint), `/_status/vars`. The list of affected metrics is shown below. [#114506][#114506] +- Fixed a durability bug in [Raft log](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft) storage, caused by incorrect syncing of filesystem metadata. Previously, it was possible to lose writes of a particular kind (`AddSSTable`) that were used by e.g. [`RESTORE`]({% link v23.2/restore.md %}). This loss was possible only under power-off or operating system crash conditions. Under such conditions, CockroachDB could enter a crash loop on node restart. In the worst case of a coordinated power-off/crash across multiple nodes this could lead to an unrecoverable loss of [Raft quorum](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft). #115709 +- Fixed a bug where large [jobs]({% link v23.2/show-jobs.md %}) running with execution locality (such as some [changefeeds]({% link v23.2/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality)) could result in the [gateway](https://www.cockroachlabs.com/docs/v23.2/architecture/life-of-a-distributed-transaction#gateway) node being assigned most of the work, causing performance degradation and cluster instability. #115388 +- Fixed a bug that caused node crashes and panics when running [`INSERT`]({% link v23.2/insert.md %}) queries on [`REGIONAL BY ROW` tables with `UNIQUE` constraints or indexes]({% link v23.2/alter-table.md %}#add-a-unique-index-to-a-regional-by-row-table). The bug was only present in v23.2.0-beta.1. #115668 +- Fixed a bug that existed only in v23.2 alpha and beta versions that could have caused side effects to happen out of order for [PL/pgSQL]({% link v23.2/plpgsql.md %}) routines in rare cases. #115839 +- Fixed a bug that existed since v23.1 that prevented naming [user-defined type (UDT)]({% link v23.2/create-type.md %}) parameters when dropping a [user-defined function]({% link v23.2/user-defined-functions.md %}) (or procedure). #115904 +- Fixed a bug where [scheduled jobs]({% link v23.2/show-schedules.md %}) using [external storage providers]({% link v23.2/create-external-connection.md %}) could fail shortly after node startup. #115693 +- Locking tables (e.g., with [`SELECT FOR UPDATE`]({% link v23.2/select-for-update.md %})) on the null-extended side of [outer joins]({% link v23.2/joins.md %}) (e.g., the right side of a `LEFT JOIN`) is now disallowed and returns an error. This improves compatibility with PostgreSQL and prevents ambiguity in [locking semantics](https://www.cockroachlabs.com/docs/v23.2/architecture/transaction-layer#concurrency-control). This bug has existed since locking with `FOR UPDATE` was introduced. #115878 +- Fixed a display bug in the [DB Console]({% link v23.2/ui-overview.md %}) where because not all types of [schema changes]({% link v23.2/online-schema-changes.md %}) are setting the value for the mutation ID, the value of the ID could previously show as "with ID undefined" on the [Events panel]({% link v23.2/ui-overview-dashboard.md %}#events-panel). Now, the notification omits the undefined value (the rest of the event notification is still displayed). #116518 +- Fixed the formatting for [PL/pgSQL]({% link v23.2/plpgsql.md %}) routines, which could prevent creating a routine with [loop labels]({% link v23.2/create-procedure.md %}#create-a-stored-procedure-that-uses-a-while-loop), and could prevent some expressions from being [redacted]({% link v23.2/configure-logs.md %}#redact-logs) correctly. The bug only existed in v23.2 alpha and beta releases. #116713 +- Fixed a bug that would cause a syntax error during [redaction]({% link v23.2/configure-logs.md %}#redact-logs) of a [PL/pgSQL]({% link v23.2/plpgsql.md %}) routine. The bug existed only in v23.2 alpha and beta releases. #116713 +- Fixed a bug that would cause syntax errors when attempting to [`RESTORE`]({% link v23.2/restore.md %}) a database with [PL/pgSQL]({% link v23.2/plpgsql.md %}) [user-defined functions (UDFs)]({% link v23.2/user-defined-functions.md %}) or [stored procedures]({% link v23.2/stored-procedures.md %}). This bug only affected v23.2 alpha and beta releases. #116713 +- [`UPDATE`]({% link v23.2/update.md %}), [`UPSERT`]({% link v23.2/upsert.md %}), and [`INSERT ON CONFLICT`]({% link v23.2/insert.md %}#on-conflict-clause) queries are now disallowed under [Read Committed isolation]({% link v23.2/read-committed.md %}) when the table contains a [check constraint]({% link v23.2/check.md %}) involving a [column family]({% link v23.2/column-families.md %}) that is updated, and the check constraint also involves a column family that is **not** updated, but **is** read. This is a temporary fix to prevent possible violation of the check constraint, and the restriction will be lifted in the future. #116428 +- Previously, all `AggHistogram`-powered metrics were not reporting quantiles properly in the [DB Console]({% link v23.2/ui-overview.md %}). This patch fixes the histograms so that the quantiles in DB Console are reported correctly. these histograms were only broken in the [DB Console metrics dashboards]({% link v23.2/ui-overview-dashboard.md %}), but were **not** broken in the [Prometheus-compatible endpoint]({% link v23.2/monitoring-and-alerting.md %}#prometheus-endpoint), `/_status/vars`. The list of affected metrics is shown below. #114506 - `changefeed.message_size_hist` - `changefeed.parallel_io_queue_nanos` - `changefeed.sink_batch_hist_nanos` @@ -75,30 +75,30 @@ Release Date: February 20, 2024 - `jobs.row_level_ttl.span_total_duration` - `jobs.row_level_ttl.select_duration` - `jobs.row_level_ttl.delete_duration` -- Fixed a bug introduced in v23.2 that caused internal errors and panics when certain SQL queries were run with automatic [index recommendation]({% link v23.2/ui-databases-page.md %}#index-recommendations) collection enabled. [#117453][#117453] -- [Standard indexes]({% link v23.2/indexes.md %}) and [inverted indexes]({% link v23.2/inverted-indexes.md %}) may no longer be created on [PL/pgSQL]({% link v23.2/plpgsql.md %}) `REFCURSOR[]`s columns. `REFCURSOR` columns themselves are not indexable. [#116071][#116071] -- Fixed a bug that prevented database [`RESTORE`]({% link v23.2/restore.md %}) when the database contained a [view]({% link v23.2/views.md %}) or [routine]({% link v23.2/create-procedure.md %}) that referenced a [user-defined type (UDT)]({% link v23.2/create-type.md %}) in the body string. For views, this bug was introduced in v20.2, when [user-defined types (UDTs)]({% link v23.2/create-type.md %}) were introduced. For routines, this bug was introduced in v22.2, when user-defined functions (UDFs) were introduced. [#116841][#116841] -- Fixed a bug that could cause a function resolution error when attempting to use a [builtin function]({% link v23.2/functions-and-operators.md %}) like `now()` as a formatting argument to a [PL/pgSQL]({% link v23.2/plpgsql.md %}) `RAISE` statement. [#116825][#116825] -- Fixed a bug where CDC custom key columns did not function correctly with [CDC queries]({% link v23.2/create-and-configure-changefeeds.md %}). For example, `CREATE CHANGEFEED WITH key_column=..., unordered AS SELECT * FROM table` now works correctly instead of retrying forever. Note that some functionalities with CDC custom keys are not fully supported, see [#115267][#115267] for more details. [#116967][#116967] -- Fixed a bug in [Raft log](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft) truncation that could lead to crash loops, and unrecoverable loss of [quorum](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft) in the unlikely worst case that all [replicas](https://www.cockroachlabs.com/docs/v23.2/architecture/overview#architecture-replica) enter this crash loop. The bug manifested when a few things coincided: The cluster was running a bulk write workload (e.g., [schema change]({% link v23.2/online-schema-changes.md %}), [import]({% link v23.2/copy.md %}), [`RESTORE`]({% link v23.2/restore.md %})); a log truncation command was running; and the process crashed at an unfortunate moment (e.g., the process was killed, or killed itself for reasons like detecting a [disk stall]({% link v23.2/cluster-setup-troubleshooting.md %}#disk-stalls)). [#116574][#116574] -- Fixed the value used for the total runtime on SQL statistics. This was using the wrong value previously, causing the [SQL Activity]({% link v23.2/ui-overview.md %}#sql-activity) page to display values with more than 100%. [#117426][#117426] -- Fixed a bug where trying to set an empty `search_path` [session variable]({% link v23.2/session-variables.md %}) resulted in an error. [#117557][#117557] -- It is now possible to assign to the parameter of a [PL/pgSQL]({% link v23.2/plpgsql.md %}) [routine]({% link v23.2/create-procedure.md %}). Previously, attempts to do this would result in a "variable not found" error at routine creation time. In addition, variable shadowing is now explicitly disabled, where previously it would cause an internal error. These bugs existed in the v23.2.0 release and the v23.2 pre-release versions. [#117715][#117715] -- Fixed a bug in the [row-level TTL]({% link v23.2/row-level-ttl.md %}) [job]({% link v23.2/show-schedules.md %}) that would cause it to skip expired rows if the [primary key]({% link v23.2/primary-key.md %}) of the table included columns of the [collated string]({% link v23.2/collate.md %}) type. This bug was present since the initial release of row-level TTL in v22.2.0. [#117512][#117512] -- Fixed a bug where concurrent [`GRANT`]({% link v23.2/grant.md %}) statements can cause deadlocks. [#117713][#117713] -- CockroachDB can now transparently retry more retryable errors when performing a non-atomic [`COPY`]({% link v23.2/copy.md %}) command. [#117895][#117895] -- Fixed a bug that caused [DML statements]({% link v23.2/performance-best-practices-overview.md %}#dml-best-practices) to fail while a [hash-sharded index]({% link v23.2/hash-sharded-indexes.md %}) was being created. The symptom of this bug was an error like `column "crdb_internal_val_shard_16" does not exist`. This bug was present since v23.1.0. [#118215][#118215] -- Previously, CockroachDB could encounter the error `unable to encode table key: *tree.DTSQuery` when operating on columns with the internal `TSQuery` type in some contexts (e.g., when collecting [table statistics]({% link v23.2/cost-based-optimizer.md %}#table-statistics) or when performing a [`DISTINCT` operation]({% link v23.2/select-clause.md %}#eliminate-duplicate-rows)). This is now fixed. The bug had been present since v23.1 when support for the internal `TSQuery` type was added. [#118321][#118321] -- Previously, in some cases CockroachDB could incorrectly evaluate queries that scanned an [inverted index]({% link v23.2/inverted-indexes.md %}) and had a [`WHERE` filter]({% link v23.2/select-clause.md %}#where-clause) in which two sides of the `AND` expression had "similar" expressions (e.g., `ARRAY['str1'] <@ col AND (ARRAY['str1'] && col OR ...)`); this is now fixed. The bug had been present since prior to v22.2. [#118360][#118360] -- Fixed a bug that could cause [`DELETE`]({% link v23.2/delete.md %}) queries sent by the [row-level TTL]({% link v23.2/row-level-ttl.md %}) [job]({% link v23.2/show-schedules.md %}) to use a [secondary index]({% link v23.2/indexes.md %}) rather than the [primary index]({% link v23.2/primary-key.md %}) to find the rows to delete. This could lead to some `DELETE` operations taking a much longer time than they should. This bug was present since v22.2.0. [#118337][#118337] -- Fixed an issue with missing data on SQL statistics, and consequently missing data on the [SQL Activity page]({% link v23.2/ui-overview.md %}#sql-activity), by properly recalculating the value from the current and past hour on the top activity table. [#118378][#118378] -- Internal queries issued by the [row-level TTL]({% link v23.2/row-level-ttl.md %}) [jobs]({% link v23.2/show-schedules.md %}) should now use optimal plans. The bug has been present since at least v22.2. [#118494][#118494] +- Fixed a bug introduced in v23.2 that caused internal errors and panics when certain SQL queries were run with automatic [index recommendation]({% link v23.2/ui-databases-page.md %}#index-recommendations) collection enabled. #117453 +- [Standard indexes]({% link v23.2/indexes.md %}) and [inverted indexes]({% link v23.2/inverted-indexes.md %}) may no longer be created on [PL/pgSQL]({% link v23.2/plpgsql.md %}) `REFCURSOR[]`s columns. `REFCURSOR` columns themselves are not indexable. #116071 +- Fixed a bug that prevented database [`RESTORE`]({% link v23.2/restore.md %}) when the database contained a [view]({% link v23.2/views.md %}) or [routine]({% link v23.2/create-procedure.md %}) that referenced a [user-defined type (UDT)]({% link v23.2/create-type.md %}) in the body string. For views, this bug was introduced in v20.2, when [user-defined types (UDTs)]({% link v23.2/create-type.md %}) were introduced. For routines, this bug was introduced in v22.2, when user-defined functions (UDFs) were introduced. #116841 +- Fixed a bug that could cause a function resolution error when attempting to use a [builtin function]({% link v23.2/functions-and-operators.md %}) like `now()` as a formatting argument to a [PL/pgSQL]({% link v23.2/plpgsql.md %}) `RAISE` statement. #116825 +- Fixed a bug where CDC custom key columns did not function correctly with [CDC queries]({% link v23.2/create-and-configure-changefeeds.md %}). For example, `CREATE CHANGEFEED WITH key_column=..., unordered AS SELECT * FROM table` now works correctly instead of retrying forever. Note that some functionalities with CDC custom keys are not fully supported, see #115267 for more details. #116967 +- Fixed a bug in [Raft log](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft) truncation that could lead to crash loops, and unrecoverable loss of [quorum](https://www.cockroachlabs.com/docs/v23.2/architecture/replication-layer#raft) in the unlikely worst case that all [replicas](https://www.cockroachlabs.com/docs/v23.2/architecture/overview#architecture-replica) enter this crash loop. The bug manifested when a few things coincided: The cluster was running a bulk write workload (e.g., [schema change]({% link v23.2/online-schema-changes.md %}), [import]({% link v23.2/copy.md %}), [`RESTORE`]({% link v23.2/restore.md %})); a log truncation command was running; and the process crashed at an unfortunate moment (e.g., the process was killed, or killed itself for reasons like detecting a [disk stall]({% link v23.2/cluster-setup-troubleshooting.md %}#disk-stalls)). #116574 +- Fixed the value used for the total runtime on SQL statistics. This was using the wrong value previously, causing the [SQL Activity]({% link v23.2/ui-overview.md %}#sql-activity) page to display values with more than 100%. #117426 +- Fixed a bug where trying to set an empty `search_path` [session variable]({% link v23.2/session-variables.md %}) resulted in an error. #117557 +- It is now possible to assign to the parameter of a [PL/pgSQL]({% link v23.2/plpgsql.md %}) [routine]({% link v23.2/create-procedure.md %}). Previously, attempts to do this would result in a "variable not found" error at routine creation time. In addition, variable shadowing is now explicitly disabled, where previously it would cause an internal error. These bugs existed in the v23.2.0 release and the v23.2 pre-release versions. #117715 +- Fixed a bug in the [row-level TTL]({% link v23.2/row-level-ttl.md %}) [job]({% link v23.2/show-schedules.md %}) that would cause it to skip expired rows if the [primary key]({% link v23.2/primary-key.md %}) of the table included columns of the [collated string]({% link v23.2/collate.md %}) type. This bug was present since the initial release of row-level TTL in v22.2.0. #117512 +- Fixed a bug where concurrent [`GRANT`]({% link v23.2/grant.md %}) statements can cause deadlocks. #117713 +- CockroachDB can now transparently retry more retryable errors when performing a non-atomic [`COPY`]({% link v23.2/copy.md %}) command. #117895 +- Fixed a bug that caused [DML statements]({% link v23.2/performance-best-practices-overview.md %}#dml-best-practices) to fail while a [hash-sharded index]({% link v23.2/hash-sharded-indexes.md %}) was being created. The symptom of this bug was an error like `column "crdb_internal_val_shard_16" does not exist`. This bug was present since v23.1.0. #118215 +- Previously, CockroachDB could encounter the error `unable to encode table key: *tree.DTSQuery` when operating on columns with the internal `TSQuery` type in some contexts (e.g., when collecting [table statistics]({% link v23.2/cost-based-optimizer.md %}#table-statistics) or when performing a [`DISTINCT` operation]({% link v23.2/select-clause.md %}#eliminate-duplicate-rows)). This is now fixed. The bug had been present since v23.1 when support for the internal `TSQuery` type was added. #118321 +- Previously, in some cases CockroachDB could incorrectly evaluate queries that scanned an [inverted index]({% link v23.2/inverted-indexes.md %}) and had a [`WHERE` filter]({% link v23.2/select-clause.md %}#where-clause) in which two sides of the `AND` expression had "similar" expressions (e.g., `ARRAY['str1'] <@ col AND (ARRAY['str1'] && col OR ...)`); this is now fixed. The bug had been present since prior to v22.2. #118360 +- Fixed a bug that could cause [`DELETE`]({% link v23.2/delete.md %}) queries sent by the [row-level TTL]({% link v23.2/row-level-ttl.md %}) [job]({% link v23.2/show-schedules.md %}) to use a [secondary index]({% link v23.2/indexes.md %}) rather than the [primary index]({% link v23.2/primary-key.md %}) to find the rows to delete. This could lead to some `DELETE` operations taking a much longer time than they should. This bug was present since v22.2.0. #118337 +- Fixed an issue with missing data on SQL statistics, and consequently missing data on the [SQL Activity page]({% link v23.2/ui-overview.md %}#sql-activity), by properly recalculating the value from the current and past hour on the top activity table. #118378 +- Internal queries issued by the [row-level TTL]({% link v23.2/row-level-ttl.md %}) [jobs]({% link v23.2/show-schedules.md %}) should now use optimal plans. The bug has been present since at least v22.2. #118494 - Fixed a bug where a [changefeed](https://www.cockroachlabs.com/docs/v23.2/change-data-capture-overview.html) could omit events in rare cases, logging the error `cdc ux violation: detected timestamp ... that is less or equal to the local frontier`. This could happen in the following scenario: 1. A [rangefeed](https://www.cockroachlabs.com/docs/v23.2/create-and-configure-changefeeds.html#enable-rangefeeds) runs on a follower [replica](https://www.cockroachlabs.com/docs/v23.2/architecture/glossary#cockroachdb-architecture-terms) that lags significantly behind the [leaseholder](https://www.cockroachlabs.com/docs/v23.2/architecture/glossary#cockroachdb-architecture-terms). 1. A transaction commits and removes its [transaction record](https://www.cockroachlabs.com/docs/v23.2/architecture/transaction-layer#transaction-records) before its [intent](https://www.cockroachlabs.com/docs/v23.2/architecture/transaction-layer#writing) resolution is applied on the follower. 1. The follower's [closed timestamp](https://www.cockroachlabs.com/docs/v23.2/architecture/transaction-layer#closed-timestamps) has advanced past the transaction commit timestamp. 1. The rangefeed attempts to push the transaction to a new timestamp (at least 10 seconds after the transaction began). - 1. This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the [changefeed]({% link v23.2/change-data-capture-overview.md %}) to drop these events entirely, never emitting them. [#118981][#118981] + 1. This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the [changefeed]({% link v23.2/change-data-capture-overview.md %}) to drop these events entirely, never emitting them. #118981
@@ -108,61 +108,3 @@ This release includes 252 merged PRs by 60 authors.
-[#114506]: https://github.com/cockroachdb/cockroach/pull/114506 -[#115202]: https://github.com/cockroachdb/cockroach/pull/115202 -[#115339]: https://github.com/cockroachdb/cockroach/pull/115339 -[#115388]: https://github.com/cockroachdb/cockroach/pull/115388 -[#115668]: https://github.com/cockroachdb/cockroach/pull/115668 -[#115693]: https://github.com/cockroachdb/cockroach/pull/115693 -[#115709]: https://github.com/cockroachdb/cockroach/pull/115709 -[#115801]: https://github.com/cockroachdb/cockroach/pull/115801 -[#115839]: https://github.com/cockroachdb/cockroach/pull/115839 -[#115878]: https://github.com/cockroachdb/cockroach/pull/115878 -[#115904]: https://github.com/cockroachdb/cockroach/pull/115904 -[#116071]: https://github.com/cockroachdb/cockroach/pull/116071 -[#116274]: https://github.com/cockroachdb/cockroach/pull/116274 -[#116359]: https://github.com/cockroachdb/cockroach/pull/116359 -[#116402]: https://github.com/cockroachdb/cockroach/pull/116402 -[#116419]: https://github.com/cockroachdb/cockroach/pull/116419 -[#116428]: https://github.com/cockroachdb/cockroach/pull/116428 -[#116449]: https://github.com/cockroachdb/cockroach/pull/116449 -[#116487]: https://github.com/cockroachdb/cockroach/pull/116487 -[#116518]: https://github.com/cockroachdb/cockroach/pull/116518 -[#116574]: https://github.com/cockroachdb/cockroach/pull/116574 -[#116599]: https://github.com/cockroachdb/cockroach/pull/116599 -[#116712]: https://github.com/cockroachdb/cockroach/pull/116712 -[#116713]: https://github.com/cockroachdb/cockroach/pull/116713 -[#116825]: https://github.com/cockroachdb/cockroach/pull/116825 -[#116826]: https://github.com/cockroachdb/cockroach/pull/116826 -[#116841]: https://github.com/cockroachdb/cockroach/pull/116841 -[#116945]: https://github.com/cockroachdb/cockroach/pull/116945 -[#116967]: https://github.com/cockroachdb/cockroach/pull/116967 -[#117324]: https://github.com/cockroachdb/cockroach/pull/117324 -[#117325]: https://github.com/cockroachdb/cockroach/pull/117325 -[#117330]: https://github.com/cockroachdb/cockroach/pull/117330 -[#117426]: https://github.com/cockroachdb/cockroach/pull/117426 -[#117453]: https://github.com/cockroachdb/cockroach/pull/117453 -[#117512]: https://github.com/cockroachdb/cockroach/pull/117512 -[#117529]: https://github.com/cockroachdb/cockroach/pull/117529 -[#117557]: https://github.com/cockroachdb/cockroach/pull/117557 -[#117594]: https://github.com/cockroachdb/cockroach/pull/117594 -[#117713]: https://github.com/cockroachdb/cockroach/pull/117713 -[#117715]: https://github.com/cockroachdb/cockroach/pull/117715 -[#117719]: https://github.com/cockroachdb/cockroach/pull/117719 -[#117729]: https://github.com/cockroachdb/cockroach/pull/117729 -[#117895]: https://github.com/cockroachdb/cockroach/pull/117895 -[#117961]: https://github.com/cockroachdb/cockroach/pull/117961 -[#118154]: https://github.com/cockroachdb/cockroach/pull/118154 -[#118169]: https://github.com/cockroachdb/cockroach/pull/118169 -[#118215]: https://github.com/cockroachdb/cockroach/pull/118215 -[#118257]: https://github.com/cockroachdb/cockroach/pull/118257 -[#118321]: https://github.com/cockroachdb/cockroach/pull/118321 -[#118334]: https://github.com/cockroachdb/cockroach/pull/118334 -[#118337]: https://github.com/cockroachdb/cockroach/pull/118337 -[#118360]: https://github.com/cockroachdb/cockroach/pull/118360 -[#118378]: https://github.com/cockroachdb/cockroach/pull/118378 -[#118494]: https://github.com/cockroachdb/cockroach/pull/118494 -[#118895]: https://github.com/cockroachdb/cockroach/pull/118895 -[#118981]: https://github.com/cockroachdb/cockroach/pull/118981 -[#115267]: https://github.com/cockroachdb/cockroach/pull/115267 -[#119249]: https://github.com/cockroachdb/cockroach/pull/119249 diff --git a/src/current/_includes/releases/v23.2/v23.2.10.md b/src/current/_includes/releases/v23.2/v23.2.10.md index a84bc9ca3db..8d58bfd14f5 100644 --- a/src/current/_includes/releases/v23.2/v23.2.10.md +++ b/src/current/_includes/releases/v23.2/v23.2.10.md @@ -20,41 +20,41 @@ Release Date: August 29, 2024 - [`IMPORT INTO`]({% link v23.2/import-into.md %}) - [`RESTORE`]({% link v23.2/restore.md %}) - [`SHOW BACKUPS`]({% link v23.2/show-backup.md %}) - - [`SHOW BACKUP`]({% link v23.2/show-backup.md %}) [#127509][#127509] + - [`SHOW BACKUP`]({% link v23.2/show-backup.md %}) #127509

{{ site.data.products.enterprise }} edition changes

-- Added a new [Kafka sink]({% link v23.2/changefeed-sinks.md %}#kafka) utilizing the `franz-go` library and our own `batching_sink` behind a [cluster setting]({% link v23.2/cluster-settings.md %}) (`changefeed.new_kafka_sink_enabled`, disabled by default). [#128048][#128048] -- The v2 Kafka and Google Cloud Pub/Sub [changefeed sinks]({% link v23.2/changefeed-sinks.md %}) now display notices indicating the topics they will emit to. [#128459][#128459] +- Added a new [Kafka sink]({% link v23.2/changefeed-sinks.md %}#kafka) utilizing the `franz-go` library and our own `batching_sink` behind a [cluster setting]({% link v23.2/cluster-settings.md %}) (`changefeed.new_kafka_sink_enabled`, disabled by default). #128048 +- The v2 Kafka and Google Cloud Pub/Sub [changefeed sinks]({% link v23.2/changefeed-sinks.md %}) now display notices indicating the topics they will emit to. #128459

SQL language changes

-- Added the [`sql.auth.grant_option_for_owner.enabled` cluster setting]({% link v23.2/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled). The default value is `true`, which results in behavior that matches the existing behavior of CockroachDB. When set to `false`, then the `GRANT OPTION` is not implcitly given to the owner of an object. The object owner still implicitly has all privileges on the object, just not the ability to grant them to other users. [#126958][#126958] -- Fixed a bug where the `DISCARD` statement was disallowed when the [`default_transaction_read_only`]({% link v23.2/session-variables.md %}#default-transaction-read-only) session setting was set to `on`. [#127548][#127548] +- Added the [`sql.auth.grant_option_for_owner.enabled` cluster setting]({% link v23.2/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled). The default value is `true`, which results in behavior that matches the existing behavior of CockroachDB. When set to `false`, then the `GRANT OPTION` is not implcitly given to the owner of an object. The object owner still implicitly has all privileges on the object, just not the ability to grant them to other users. #126958 +- Fixed a bug where the `DISCARD` statement was disallowed when the [`default_transaction_read_only`]({% link v23.2/session-variables.md %}#default-transaction-read-only) session setting was set to `on`. #127548

DB Console changes

-- The [**Databases** and **Tables** pages]({% link v23.2/ui-databases-page.md %}) in the DB Console now show a loading state while loading information for databases and tables, including size and range counts. [#127709][#127709] -- On the [**Databases** page]({% link v23.2/ui-databases-page.md %}) in the DB Console, table names will no longer appear with quotes around the schema and table name. [#127765][#127765] +- The [**Databases** and **Tables** pages]({% link v23.2/ui-databases-page.md %}) in the DB Console now show a loading state while loading information for databases and tables, including size and range counts. #127709 +- On the [**Databases** page]({% link v23.2/ui-databases-page.md %}) in the DB Console, table names will no longer appear with quotes around the schema and table name. #127765

Bug fixes

-- Fixed a bug causing gateway nodes to crash while executing [`INSERT`]({% link v23.2/insert.md %}) statements in [`REGIONAL BY ROW`]({% link v23.2/table-localities.md %}#regional-by-row-tables) tables. This bug had been present since v23.2. [#127276][#127276] -- Fixed a bug where [dropping `ENUM` values]({% link v23.2/alter-type.md %}#drop-a-value-in-a-user-defined-type) that were referenced by [index expressions]({% link v23.2/expression-indexes.md %}) could fail with an error. [#127453][#127453] -- Fixed a bug that caused a memory leak when executing SQL statements with comments, e.g., [`SELECT /* comment */ 1;`]({% link v23.2/select-clause.md %}). Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the SQL session. This bug had been present since v23.1. [#127758][#127758] -- Fixed a memory leak that could occur when specifying a non-existent [virtual cluster]({% link v23.2/cluster-virtualization-overview.md %}) name in the connection string. [#128104][#128104] -- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v23.2/create-index.md %}) would not correctly short-circuit if the given index already existed. [#128312][#128312] -- Fixed a bug in overly eager syntax validation, which did not allow the `DESCENDING` clause for non-terminal columns of an [inverted index]({% link v23.2/inverted-indexes.md %}). Only the last column of an inverted index should be prevented from being `DESCENDING`, and this is now properly checked. [#128312][#128312] -- Fixed a bug where an [index]({% link v23.2/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. [#128312][#128312] -- Fixed small memory leaks that would occur during [changefeed creation]({% link v23.2/create-changefeed.md %}). [#128048][#128048] -- Setting or dropping a default value on a [computed column]({% link v23.2/computed-columns.md %}) is now blocked -- even for null defaults. Previously, setting or dropping a default value on a computed column was a no-op; now there will be an error message. [#128467][#128467] -- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug had existed since [user-defined functions]({% link v23.2/user-defined-functions.md %}) were introduced in v22.2. [#126413][#126413] -- Fixed a bug where a hash-sharded constraint could not be created if it referred to columns that had a backslash in the name. [#128676][#128676] -- Fixed a bug where `TYPEDESC SCHEMA CHANGE` jobs could end up retrying forever if the descriptor targeted by them was already dropped. [#128461][#128461] +- Fixed a bug causing gateway nodes to crash while executing [`INSERT`]({% link v23.2/insert.md %}) statements in [`REGIONAL BY ROW`]({% link v23.2/table-localities.md %}#regional-by-row-tables) tables. This bug had been present since v23.2. #127276 +- Fixed a bug where [dropping `ENUM` values]({% link v23.2/alter-type.md %}#drop-a-value-in-a-user-defined-type) that were referenced by [index expressions]({% link v23.2/expression-indexes.md %}) could fail with an error. #127453 +- Fixed a bug that caused a memory leak when executing SQL statements with comments, e.g., [`SELECT /* comment */ 1;`]({% link v23.2/select-clause.md %}). Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the SQL session. This bug had been present since v23.1. #127758 +- Fixed a memory leak that could occur when specifying a non-existent [virtual cluster]({% link v23.2/cluster-virtualization-overview.md %}) name in the connection string. #128104 +- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v23.2/create-index.md %}) would not correctly short-circuit if the given index already existed. #128312 +- Fixed a bug in overly eager syntax validation, which did not allow the `DESCENDING` clause for non-terminal columns of an [inverted index]({% link v23.2/inverted-indexes.md %}). Only the last column of an inverted index should be prevented from being `DESCENDING`, and this is now properly checked. #128312 +- Fixed a bug where an [index]({% link v23.2/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. #128312 +- Fixed small memory leaks that would occur during [changefeed creation]({% link v23.2/create-changefeed.md %}). #128048 +- Setting or dropping a default value on a [computed column]({% link v23.2/computed-columns.md %}) is now blocked -- even for null defaults. Previously, setting or dropping a default value on a computed column was a no-op; now there will be an error message. #128467 +- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug had existed since [user-defined functions]({% link v23.2/user-defined-functions.md %}) were introduced in v22.2. #126413 +- Fixed a bug where a hash-sharded constraint could not be created if it referred to columns that had a backslash in the name. #128676 +- Fixed a bug where `TYPEDESC SCHEMA CHANGE` jobs could end up retrying forever if the descriptor targeted by them was already dropped. #128461 - Fixed a bug in which the output of [`EXPLAIN (OPT, REDACT)`]({% link v23.2/explain.md %}) for various `CREATE` statements was not redacted. This bug had existed since [`EXPLAIN (REDACT)`]({% link v23.2/explain.md %}#parameters) was introduced in v23.1 and affects the following statements: - `EXPLAIN (OPT, REDACT) CREATE TABLE` - `EXPLAIN (OPT, REDACT) CREATE VIEW` - - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` [#128488][#128488] + - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` #128488
@@ -64,27 +64,3 @@ This release includes 80 merged PRs by 28 authors.
-[#126413]: https://github.com/cockroachdb/cockroach/pull/126413 -[#126958]: https://github.com/cockroachdb/cockroach/pull/126958 -[#127276]: https://github.com/cockroachdb/cockroach/pull/127276 -[#127389]: https://github.com/cockroachdb/cockroach/pull/127389 -[#127453]: https://github.com/cockroachdb/cockroach/pull/127453 -[#127509]: https://github.com/cockroachdb/cockroach/pull/127509 -[#127548]: https://github.com/cockroachdb/cockroach/pull/127548 -[#127607]: https://github.com/cockroachdb/cockroach/pull/127607 -[#127709]: https://github.com/cockroachdb/cockroach/pull/127709 -[#127758]: https://github.com/cockroachdb/cockroach/pull/127758 -[#127765]: https://github.com/cockroachdb/cockroach/pull/127765 -[#127854]: https://github.com/cockroachdb/cockroach/pull/127854 -[#128048]: https://github.com/cockroachdb/cockroach/pull/128048 -[#128104]: https://github.com/cockroachdb/cockroach/pull/128104 -[#128312]: https://github.com/cockroachdb/cockroach/pull/128312 -[#128459]: https://github.com/cockroachdb/cockroach/pull/128459 -[#128461]: https://github.com/cockroachdb/cockroach/pull/128461 -[#128467]: https://github.com/cockroachdb/cockroach/pull/128467 -[#128488]: https://github.com/cockroachdb/cockroach/pull/128488 -[#128582]: https://github.com/cockroachdb/cockroach/pull/128582 -[#128676]: https://github.com/cockroachdb/cockroach/pull/128676 -[45ad2e9aa]: https://github.com/cockroachdb/cockroach/commit/45ad2e9aa -[dd9b455ab]: https://github.com/cockroachdb/cockroach/commit/dd9b455ab -[e7e3f836d]: https://github.com/cockroachdb/cockroach/commit/e7e3f836d diff --git a/src/current/_includes/releases/v23.2/v23.2.11.md b/src/current/_includes/releases/v23.2/v23.2.11.md index a706d42583e..bad1de11d2e 100644 --- a/src/current/_includes/releases/v23.2/v23.2.11.md +++ b/src/current/_includes/releases/v23.2/v23.2.11.md @@ -5,9 +5,6 @@ Release Date: September 16, 2024 {% include releases/new-release-downloads-docker-image.md release=include.release %}

Bug fixes

-- Internally issued queries that are not initiated within a [SQL session]({% link v23.2/show-sessions.md %}) no longer respect a [statement timeout]({% link v23.2/session-variables.md %}#statement-timeout). This includes: background [jobs]({% link v23.2/show-jobs.md %}), queries issued by the [DB Console]({% link v23.2/ui-overview.md %}) that perform introspection, and the [Cloud SQL shell]({% link cockroachcloud/sql-shell.md %}). [#130525][#130525] -- Fixed a rare bug where a [lease transfer]({% link v23.2/architecture/replication-layer.md %}#leases) could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was [overloaded]({% link v23.2/ui-overload-dashboard.md %}) and failing to heartbeat its [node liveness]({% link v23.2/cluster-setup-troubleshooting.md %}#node-liveness-issues) record. [#130523][#130523] -- Resolved a concerning [log]({% link v23.2/logging-overview.md %}) message: `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. This message is no longer possible. [#130523][#130523] - -[#130523]: https://github.com/cockroachdb/cockroach/pull/130523 -[#130525]: https://github.com/cockroachdb/cockroach/pull/130525 \ No newline at end of file +- Internally issued queries that are not initiated within a [SQL session]({% link v23.2/show-sessions.md %}) no longer respect a [statement timeout]({% link v23.2/session-variables.md %}#statement-timeout). This includes: background [jobs]({% link v23.2/show-jobs.md %}), queries issued by the [DB Console]({% link v23.2/ui-overview.md %}) that perform introspection, and the [Cloud SQL shell]({% link cockroachcloud/sql-shell.md %}). #130525 +- Fixed a rare bug where a [lease transfer]({% link v23.2/architecture/replication-layer.md %}#leases) could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was [overloaded]({% link v23.2/ui-overload-dashboard.md %}) and failing to heartbeat its [node liveness]({% link v23.2/cluster-setup-troubleshooting.md %}#node-liveness-issues) record. #130523 +- Resolved a concerning [log]({% link v23.2/logging-overview.md %}) message: `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. This message is no longer possible. #130523 diff --git a/src/current/_includes/releases/v23.2/v23.2.12.md b/src/current/_includes/releases/v23.2/v23.2.12.md index 81aef686289..b8361745658 100644 --- a/src/current/_includes/releases/v23.2/v23.2.12.md +++ b/src/current/_includes/releases/v23.2/v23.2.12.md @@ -6,72 +6,50 @@ Release Date: September 25, 2024

{{ site.data.products.enterprise }} edition changes

- Added a `changefeed.protect_timestamp.lag` metric, which controls how much the changefeed [protected timestamp (PTS)]({% link v23.2/protect-changefeed-data.md %}) should lag behind the [high-water mark]({% link v23.2/how-does-an-enterprise-changefeed-work.md %}). A changefeed now only updates its PTS if `changefeed.protect_timestamp.lag` has passed between the last PTS and the changefeed high-water mark. [#129685][#129685] -- [`SHOW CHANGEFEED JOB`]({% link v23.2/show-jobs.md %}#show-changefeed-jobs), [`SHOW CHANGEFEED JOBS`]({% link v23.2/show-jobs.md %}#show-changefeed-jobs), and [`SHOW JOBS`]({% link v23.2/show-jobs.md %}) no longer expose user sensitive information like `client_key`. [#122681][#122681] + Added a `changefeed.protect_timestamp.lag` metric, which controls how much the changefeed [protected timestamp (PTS)]({% link v23.2/protect-changefeed-data.md %}) should lag behind the [high-water mark]({% link v23.2/how-does-an-enterprise-changefeed-work.md %}). A changefeed now only updates its PTS if `changefeed.protect_timestamp.lag` has passed between the last PTS and the changefeed high-water mark. #129685 +- [`SHOW CHANGEFEED JOB`]({% link v23.2/show-jobs.md %}#show-changefeed-jobs), [`SHOW CHANGEFEED JOBS`]({% link v23.2/show-jobs.md %}#show-changefeed-jobs), and [`SHOW JOBS`]({% link v23.2/show-jobs.md %}) no longer expose user sensitive information like `client_key`. #122681

SQL language changes

-- The [session setting]({% link v23.2/session-variables.md %}) `plan_cache_mode=force_generic_plan` can now be used to force prepared statements to use a query plan that is [optimized]({% link v23.2/cost-based-optimizer.md %}) once and reused in future executions without re-optimization, as long as the plan does not become stale due to [schema changes]({% link v23.2/online-schema-changes.md %}) or a collection of new [table statistics]({% link v23.2/show-statistics.md %}). The setting takes effect during [`EXECUTE`]({% link v23.2/sql-grammar.md %}#execute_stmt) commands. [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) now includes a `plan type` field. If a generic query plan is optimized for the current execution, the `plan type` will be `generic, re-optimized`. If a generic query plan is reused for the current execution without performing optimization, the `plan type` will be `generic, reused`. Otherwise, the `plan type` will be `custom`. [#128100][#128100] -- The [session setting]({% link v23.2/session-variables.md %}) `plan_cache_mode=auto` can now be used to instruct the [cost-based optimizer]({% link v23.2/cost-based-optimizer.md %}) to automatically determine whether to use "custom" or "generic" query plans for the execution of a prepared statement. Custom query plans are optimized on every execution, while generic plans are optimized once and reused on future executions as-is. Generic query plans are beneficial in cases where query optimization contributes significant overhead to the total cost of executing a query. [#128100][#128100] +- The [session setting]({% link v23.2/session-variables.md %}) `plan_cache_mode=force_generic_plan` can now be used to force prepared statements to use a query plan that is [optimized]({% link v23.2/cost-based-optimizer.md %}) once and reused in future executions without re-optimization, as long as the plan does not become stale due to [schema changes]({% link v23.2/online-schema-changes.md %}) or a collection of new [table statistics]({% link v23.2/show-statistics.md %}). The setting takes effect during [`EXECUTE`]({% link v23.2/sql-grammar.md %}#execute_stmt) commands. [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) now includes a `plan type` field. If a generic query plan is optimized for the current execution, the `plan type` will be `generic, re-optimized`. If a generic query plan is reused for the current execution without performing optimization, the `plan type` will be `generic, reused`. Otherwise, the `plan type` will be `custom`. #128100 +- The [session setting]({% link v23.2/session-variables.md %}) `plan_cache_mode=auto` can now be used to instruct the [cost-based optimizer]({% link v23.2/cost-based-optimizer.md %}) to automatically determine whether to use "custom" or "generic" query plans for the execution of a prepared statement. Custom query plans are optimized on every execution, while generic plans are optimized once and reused on future executions as-is. Generic query plans are beneficial in cases where query optimization contributes significant overhead to the total cost of executing a query. #128100

Operational changes

- There are now structured logging events that report connection breakage during [node shutdown]({% link v23.2/node-shutdown.md %}). Previously, these logs existed but were unstructured. These logs appear in the `OPS` [logging channel]({% link v23.2/logging-overview.md %}#logging-channels). There are two new events: - The `node_shutdown_connection_timeout` event is logged after the timeout defined by the [cluster setting `server.shutdown.connections.timeout`]({% link v23.2/cluster-settings.md %}#setting-server-shutdown-connection-wait) transpires, if there are still open SQL connections. - - The `node_shutdown_transaction_timeout` event is logged after the timeout defined by the [cluster setting `server.shutdown.transactions.timeout` transpires]({% link v23.2/cluster-settings.md %}#setting-server-shutdown-query-wait), if there are still open [transactions]({% link v23.2/transactions.md %}) on those SQL connections. [#128710][#128710] -- Added the `ranges.decommissioning` metric, representing the number of [ranges]({% link v23.2/architecture/overview.md %}#architecture-range) which have a [replica]({% link v23.2/architecture/overview.md %}#architecture-replica) on a [decommissioning node]({% link v23.2/cockroach-node.md %}#decommission-nodes). [#130413][#130413] + - The `node_shutdown_transaction_timeout` event is logged after the timeout defined by the [cluster setting `server.shutdown.transactions.timeout` transpires]({% link v23.2/cluster-settings.md %}#setting-server-shutdown-query-wait), if there are still open [transactions]({% link v23.2/transactions.md %}) on those SQL connections. #128710 +- Added the `ranges.decommissioning` metric, representing the number of [ranges]({% link v23.2/architecture/overview.md %}#architecture-range) which have a [replica]({% link v23.2/architecture/overview.md %}#architecture-replica) on a [decommissioning node]({% link v23.2/cockroach-node.md %}#decommission-nodes). #130413 - Added three new network tracking metrics: - `rpc.connection.connected` is the number of rRPC TCP-level connections established to remote nodes. - `rpc.client.bytes.egress` is the number of TCP bytes sent via gRPC on connections we initiated. - - `rpc.client.bytes.ingress` is the number of TCP bytes received via gRPC on connections we initiated. [cockroachdb/cockroach#130712][#130712] -- Added a new configuration parameter `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. [#130712][#130712] -- Modified metrics `sql.bytesin` and `sql.bytesout` to be aggregation metrics If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled. [#130712][#130712] + - `rpc.client.bytes.ingress` is the number of TCP bytes received via gRPC on connections we initiated. [cockroachdb/cockroach#130712]#130712 +- Added a new configuration parameter `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. #130712 +- Modified metrics `sql.bytesin` and `sql.bytesout` to be aggregation metrics If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled. #130712 - Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by individual [changefeeds]({% link v23.2/create-and-configure-changefeeds.md %}) to the following sinks: - [Kafka sinks]({% link v23.2/changefeed-sinks.md %}#kafka). If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled, the metric will have a `kafka` label. - [Webhook sinks]({% link v23.2/changefeed-sinks.md %}#webhook-sink). If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled, the metric will have a `webhook` label. - [Pub/Sub sinks]({% link v23.2/changefeed-sinks.md %}#google-cloud-pub-sub). If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled, the metric will have a `pubsub` label. - - [SQL sink]({% link v23.2/changefeed-for.md %}). If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled, the metric will have a `sql` label. [#130712][#130712] + - [SQL sink]({% link v23.2/changefeed-for.md %}). If the [`server.child_metrics.enabled`]({% link v23.2/cluster-settings.md %}#setting-server-child-metrics-enabled) cluster setting is enabled, the metric will have a `sql` label. #130712

DB Console changes

-- The [DB Console]({% link v23.2/ui-overview-dashboard.md %}) time-series graphs now have hover behavior that focuses on individual lines and shows values under the mouse pointer. [cockroachdb/cockroach#128864][#128864] -- Users with the [`VIEWACTIVITY` privilege]({% link v23.2/security-reference/authorization.md %}#viewactivity) can download [statement bundles]({% link v23.2/explain-analyze.md %}) from [DB Console]({% link v23.2/ui-overview.md %}). [#129502][#129502] -- The [DB Console]({% link v23.2/ui-overview.md %}) now displays an alert message when the [license is expired]({% link v23.2/licensing-faqs.md %}#monitor-for-license-expiry) or if there are fewer than 15 days left before the license expires. [#130509][#130509] -- The [DB Console]({% link v23.2/ui-overview.md %}) will now show a notification alerting customers without an Enterprise license to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) with a link to more information. [#130509][#130509] +- The [DB Console]({% link v23.2/ui-overview-dashboard.md %}) time-series graphs now have hover behavior that focuses on individual lines and shows values under the mouse pointer. [cockroachdb/cockroach#128864]#128864 +- Users with the [`VIEWACTIVITY` privilege]({% link v23.2/security-reference/authorization.md %}#viewactivity) can download [statement bundles]({% link v23.2/explain-analyze.md %}) from [DB Console]({% link v23.2/ui-overview.md %}). #129502 +- The [DB Console]({% link v23.2/ui-overview.md %}) now displays an alert message when the [license is expired]({% link v23.2/licensing-faqs.md %}#monitor-for-license-expiry) or if there are fewer than 15 days left before the license expires. #130509 +- The [DB Console]({% link v23.2/ui-overview.md %}) will now show a notification alerting customers without an Enterprise license to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) with a link to more information. #130509

Bug fixes

-- Fixed a bug where declarative and legacy [schema changes]({% link v23.2/online-schema-changes.md %}) were incorrectly allowed to be executed concurrently, which could lead to failing or hung schema change jobs. [#128838][#128838] -- Fixed a bug that caused errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` when accessing a table with an [expression index]({% link v23.2/expression-indexes.md %}) where the expression evaluates to an [`ENUM`]({% link v23.2/enum.md %}) type, e.g., `CREATE INDEX ON t ((col::an_enum))`. [#129092][#129092] -- [Function]({% link v23.2/user-defined-functions.md %}) input parameters can no longer have the `VOID` type. [#129281][#129281] -- Internally issued queries that are not initiated within a [SQL session]({% link v23.2/show-sessions.md %}) no longer respect a statement timeout. This includes: [background jobs]({% link v23.2/show-jobs.md %}), queries issued by the [DB Console]({% link v23.2/ui-overview.md %}) that perform introspection, and the {{ site.data.products.cloud }} [SQL shell]({% link cockroachcloud/sql-shell.md %}). [#129517][#129517] -- Fixed a bug where the `schema_locked` [storage parameter]({% link v23.2/with-storage-parameter.md %}) did not prevent a table from being referenced by a [foreign key]({% link v23.2/foreign-key.md %}). [#129753][#129753] -- Users with the [`VIEWACTIVITY` SQL privilege]({% link v23.2/security-reference/authorization.md %}#viewactivity) can now request, view, and cancel [statement bundles]({% link v23.2/explain-analyze.md %}) in the [DB Console]({% link v23.2/ui-overview.md %}). [#129803][#129803] -- Fixed a bug where a [lease transfer]({% link v23.2/architecture/replication-layer.md %}#epoch-based-leases-table-data) could lead to a panic with the message `side-transport update saw closed timestamp regression`. The bug could occur when a node was overloaded and failing to [heartbeat its node liveness record]({% link v23.2/cluster-setup-troubleshooting.md %}#node-liveness-issues). [#129808][#129808] -- The [log message]({% link v23.2/logging-overview.md %}) `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat` is no longer generated. [#129808][#129808] -- Fixed a bug where the `require_explicit_primary_keys` [session variable]({% link v23.2/session-variables.md %}) would prevent all [`CREATE TABLE`]({% link v23.2/create-table.md %}) statements from working. [#129906][#129906] -- Fixed a slow-building memory leak when using [Kerberos authentication]({% link v23.2/gssapi_authentication.md %}). [#130317][#130317] -- Fixed a potential memory leak in [changefeeds using a cloud storage sink]({% link v23.2/changefeed-examples.md %}#create-a-changefeed-connected-to-a-cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v23.2/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and `changefeed.cloudstorage.async_flush.enabled` were `true`, and the changefeed received an error while attempting to write to the cloud storage sink. [#130624][#130624] +- Fixed a bug where declarative and legacy [schema changes]({% link v23.2/online-schema-changes.md %}) were incorrectly allowed to be executed concurrently, which could lead to failing or hung schema change jobs. #128838 +- Fixed a bug that caused errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` when accessing a table with an [expression index]({% link v23.2/expression-indexes.md %}) where the expression evaluates to an [`ENUM`]({% link v23.2/enum.md %}) type, e.g., `CREATE INDEX ON t ((col::an_enum))`. #129092 +- [Function]({% link v23.2/user-defined-functions.md %}) input parameters can no longer have the `VOID` type. #129281 +- Internally issued queries that are not initiated within a [SQL session]({% link v23.2/show-sessions.md %}) no longer respect a statement timeout. This includes: [background jobs]({% link v23.2/show-jobs.md %}), queries issued by the [DB Console]({% link v23.2/ui-overview.md %}) that perform introspection, and the {{ site.data.products.cloud }} [SQL shell]({% link cockroachcloud/sql-shell.md %}). #129517 +- Fixed a bug where the `schema_locked` [storage parameter]({% link v23.2/with-storage-parameter.md %}) did not prevent a table from being referenced by a [foreign key]({% link v23.2/foreign-key.md %}). #129753 +- Users with the [`VIEWACTIVITY` SQL privilege]({% link v23.2/security-reference/authorization.md %}#viewactivity) can now request, view, and cancel [statement bundles]({% link v23.2/explain-analyze.md %}) in the [DB Console]({% link v23.2/ui-overview.md %}). #129803 +- Fixed a bug where a [lease transfer]({% link v23.2/architecture/replication-layer.md %}#epoch-based-leases-table-data) could lead to a panic with the message `side-transport update saw closed timestamp regression`. The bug could occur when a node was overloaded and failing to [heartbeat its node liveness record]({% link v23.2/cluster-setup-troubleshooting.md %}#node-liveness-issues). #129808 +- The [log message]({% link v23.2/logging-overview.md %}) `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat` is no longer generated. #129808 +- Fixed a bug where the `require_explicit_primary_keys` [session variable]({% link v23.2/session-variables.md %}) would prevent all [`CREATE TABLE`]({% link v23.2/create-table.md %}) statements from working. #129906 +- Fixed a slow-building memory leak when using [Kerberos authentication]({% link v23.2/gssapi_authentication.md %}). #130317 +- Fixed a potential memory leak in [changefeeds using a cloud storage sink]({% link v23.2/changefeed-examples.md %}#create-a-changefeed-connected-to-a-cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v23.2/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and `changefeed.cloudstorage.async_flush.enabled` were `true`, and the changefeed received an error while attempting to write to the cloud storage sink. #130624 -[#122681]: https://github.com/cockroachdb/cockroach/pull/122681 -[#128100]: https://github.com/cockroachdb/cockroach/pull/128100 -[#128382]: https://github.com/cockroachdb/cockroach/pull/128382 -[#128710]: https://github.com/cockroachdb/cockroach/pull/128710 -[#128835]: https://github.com/cockroachdb/cockroach/pull/128835 -[#128838]: https://github.com/cockroachdb/cockroach/pull/128838 -[#128864]: https://github.com/cockroachdb/cockroach/pull/128864 -[#129092]: https://github.com/cockroachdb/cockroach/pull/129092 -[#129281]: https://github.com/cockroachdb/cockroach/pull/129281 -[#129502]: https://github.com/cockroachdb/cockroach/pull/129502 -[#129517]: https://github.com/cockroachdb/cockroach/pull/129517 -[#129685]: https://github.com/cockroachdb/cockroach/pull/129685 -[#129753]: https://github.com/cockroachdb/cockroach/pull/129753 -[#129803]: https://github.com/cockroachdb/cockroach/pull/129803 -[#129808]: https://github.com/cockroachdb/cockroach/pull/129808 -[#129906]: https://github.com/cockroachdb/cockroach/pull/129906 -[#130045]: https://github.com/cockroachdb/cockroach/pull/130045 -[#130317]: https://github.com/cockroachdb/cockroach/pull/130317 -[#130413]: https://github.com/cockroachdb/cockroach/pull/130413 -[#130509]: https://github.com/cockroachdb/cockroach/pull/130509 -[#130624]: https://github.com/cockroachdb/cockroach/pull/130624 -[#130712]: https://github.com/cockroachdb/cockroach/pull/130712 diff --git a/src/current/_includes/releases/v23.2/v23.2.13.md b/src/current/_includes/releases/v23.2/v23.2.13.md index 26f4a561790..d4e7e5c6a60 100644 --- a/src/current/_includes/releases/v23.2/v23.2.13.md +++ b/src/current/_includes/releases/v23.2/v23.2.13.md @@ -6,7 +6,7 @@ Release Date: October 17, 2024

{{ site.data.products.enterprise }} edition changes

-- The description for the [cluster setting]({% link v23.2/cluster-settings.md %}) `changefeed.sink_io_workers` now lists all [changefeed sinks]({% link v23.2/changefeed-sinks.md %}) that support the setting. [#130372][#130372] +- The description for the [cluster setting]({% link v23.2/cluster-settings.md %}) `changefeed.sink_io_workers` now lists all [changefeed sinks]({% link v23.2/changefeed-sinks.md %}) that support the setting. #130372 - Network metrics have been added for the following [changefeed sinks]({% link v23.2/changefeed-sinks.md %}): - Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by individual [changefeeds]({% link v23.2/change-data-capture-overview.md %}) to the following sinks: - [Kafka sinks]({% link v23.2/changefeed-sinks.md %}#kafka). If child metrics are enabled, the metric will have a `kafka` label. @@ -14,78 +14,58 @@ Release Date: October 17, 2024 - [Pub/Sub sinks]({% link v23.2/changefeed-sinks.md %}#google-cloud-pub-sub). If child metrics are enabled, the metric will have a `pubsub` label. - [SQL sink]({% link v23.2/changefeed-for.md %}). If child metrics are enabled, the metric will have a `sql` label. - [#130664][#130664] + #130664 -- The new [metric]({% link v23.2/metrics.md %}) `changefeed.total_ranges` allows observation of the number of ranges that are watched by a changefeed aggregator. It uses the same polling interval as `changefeed.lagging_ranges`, which is controlled by the changefeed option `lagging_ranges_polling_interval`. [#130984][#130984] +- The new [metric]({% link v23.2/metrics.md %}) `changefeed.total_ranges` allows observation of the number of ranges that are watched by a changefeed aggregator. It uses the same polling interval as `changefeed.lagging_ranges`, which is controlled by the changefeed option `lagging_ranges_polling_interval`. #130984 - The following groups of [metrics]({% link v23.2/metrics.md %}) and [logs]({% link v23.2/logging.md %}) have been renamed to include the buffer they are associated with. The previous metrics are still maintained for backward compatibility. - `changefeed.buffer_entries.*` - `changefeed.buffer_entries_mem.*` - `changefeed.buffer_pushback_nanos.*` - [#131417][#131417] -- Added timers and corresponding [metrics]({% link v24.2/metrics.md %} for key parts of the [changefeed]({% link v24.2/change-data-capture-overview.md %}) pipeline to help debug issues with feeds. The `changefeed.stage.{stage}.latency` metrics now emit latency histograms for each stage. The metrics respect the changefeed `scope` label to debug a specific feed. [#131428][#131428] + #131417 +- Added timers and corresponding [metrics]({% link v24.2/metrics.md %} for key parts of the [changefeed]({% link v24.2/change-data-capture-overview.md %}) pipeline to help debug issues with feeds. The `changefeed.stage.{stage}.latency` metrics now emit latency histograms for each stage. The metrics respect the changefeed `scope` label to debug a specific feed. #131428

Operational changes

-- The new [metric]({% link v23.2/metrics.md %}) `ranges.decommissioning` shows the number of ranges with a replica on a [decommissioning node]({% link v23.2/node-shutdown.md %}). [#130251][#130251] +- The new [metric]({% link v23.2/metrics.md %}) `ranges.decommissioning` shows the number of ranges with a replica on a [decommissioning node]({% link v23.2/node-shutdown.md %}). #130251 - The following new [metrics]({% link v23.2/metrics.md %}) show the number of RPC TCP connections established to remote nodes: - `rpc.connection.connected`: the number of gRPC TCP level connections established to remote nodes. - `rpc.client.bytes.egress`: the number of TCP bytes sent over gRPC on connections initiated by the cluster. - `rpc.client.bytes.ingress`: the number of TCP bytes received over gRPC on connections initiated by the cluster. - [#130521][#130521] -- Added a new configuration parameter, `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. [#130528][#130528] -- The [metrics]({% link v23.2/metrics.md %}) `sql.bytesin` and `sql.bytesout` are now aggregate metrics if child metrics are enabled. [#130528][#130528] + #130521 +- Added a new configuration parameter, `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. #130528 +- The [metrics]({% link v23.2/metrics.md %}) `sql.bytesin` and `sql.bytesout` are now aggregate metrics if child metrics are enabled. #130528 - The following new [metrics]({% link v23.2/metrics.md %}) track the number of bytes sent by an individual [changefeed]({% link v23.2/change-data-capture-overview.md %}) to each sink: - `changefeed.network.bytes_in` - `changefeed.network.bytes_out` - [#130664][#130664] -- You can now set the log format for the `STDERR` changefeed sink using the `format` field in the `stderr` sink section of the [logging]({% link v23.2/logging.md %}) configuration. [#131533][#131533] + #130664 +- You can now set the log format for the `STDERR` changefeed sink using the `format` field in the `stderr` sink section of the [logging]({% link v23.2/logging.md %}) configuration. #131533

DB Console changes

-- The [DB Console]({% link v24.2/ui-overview.md %}) now shows a notification if the cluster has no Enterprise license set. Refer to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) for more information. [#130425][#130425] +- The [DB Console]({% link v24.2/ui-overview.md %}) now shows a notification if the cluster has no Enterprise license set. Refer to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) for more information. #130425

Bug fixes

-- Fixed a bug where the command `SHOW CLUSTER SETTING FOR VIRTUAL CLUSTER` would erroneously return `NULL` for some settings. [#128782][#128782] -- Fixed a bug where a node could fail to start with the error `could not insert session ...: unexpected value` if an ambiguous result error occurred while inserting data into the `sqlliveness` table. [#130343][#130343] -- Fixed a bug that could prevent [upgrade finalization]({% link v23.2/upgrade-cockroach-version.md %}) due to the upgrade pre-condition for repairing descriptor corruption. [#130519][#130519] -- Fixed a rare bug where a lease transfer could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was overloaded and failing to heartbeat its node liveness record. [#130790][#130790] -- Fixed a bug that could result in the erroneous log message `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. [#130790][#130790] -- Fixed a bug where queries that are not initiated within a SQL session could fail to respect a statement timeout, including [background jobs]({% link v23.2/show-jobs.md %}), queries issued by the [DB Console]({% link v23.2/ui-overview.md %}) that perform introspection, and the [CockroachDB {{ site.data.products.cloud }} SQL Shell]({% link cockroachcloud/sql-shell.md %}). [#130790][#130790] -- Fixed a bug where a connection could be incorrectly dropped if the client was attempting to change a schema at the same time that the same schema's objects were being dropped. [#130964][#130964] -- Fixed a bug that could cause the following error to be logged when executing a query under [READ COMMITTED]({% link v23.2/read-committed.md %}) isolation if it involved a table with `NOT NULL` virtual columns: `internal error: Non-nullable column ...`. [#131065][#131065] -- Fixed a potential memory leak in [changefeeds]({% link v23.2/change-data-capture-overview.md %}) that use a cloud storage sink. The memory leak could occur if both of the [cluster settings]({% link v23.2/cluster-settings.md %}) `changefeed.fast_gzip.enabled` and `changefeed.cloudstorage.async_flush.enabled` were `true` **and** if the changefeed received an error while attempting to write to the sink. [#130614][#130614] -- Fixed a bug introduced in v23.2.6, where [statistics]({% link v23.2/create-statistics.md %}) forecasting could predict a result of zero rows for a downward-trending statistic when `sql.stats.forecasts.max_decrease` is `false`. The setting is now enabled (set to `1/3` by default). [#131128][#131128] +- Fixed a bug where the command `SHOW CLUSTER SETTING FOR VIRTUAL CLUSTER` would erroneously return `NULL` for some settings. #128782 +- Fixed a bug where a node could fail to start with the error `could not insert session ...: unexpected value` if an ambiguous result error occurred while inserting data into the `sqlliveness` table. #130343 +- Fixed a bug that could prevent [upgrade finalization]({% link v23.2/upgrade-cockroach-version.md %}) due to the upgrade pre-condition for repairing descriptor corruption. #130519 +- Fixed a rare bug where a lease transfer could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was overloaded and failing to heartbeat its node liveness record. #130790 +- Fixed a bug that could result in the erroneous log message `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. #130790 +- Fixed a bug where queries that are not initiated within a SQL session could fail to respect a statement timeout, including [background jobs]({% link v23.2/show-jobs.md %}), queries issued by the [DB Console]({% link v23.2/ui-overview.md %}) that perform introspection, and the [CockroachDB {{ site.data.products.cloud }} SQL Shell]({% link cockroachcloud/sql-shell.md %}). #130790 +- Fixed a bug where a connection could be incorrectly dropped if the client was attempting to change a schema at the same time that the same schema's objects were being dropped. #130964 +- Fixed a bug that could cause the following error to be logged when executing a query under [READ COMMITTED]({% link v23.2/read-committed.md %}) isolation if it involved a table with `NOT NULL` virtual columns: `internal error: Non-nullable column ...`. #131065 +- Fixed a potential memory leak in [changefeeds]({% link v23.2/change-data-capture-overview.md %}) that use a cloud storage sink. The memory leak could occur if both of the [cluster settings]({% link v23.2/cluster-settings.md %}) `changefeed.fast_gzip.enabled` and `changefeed.cloudstorage.async_flush.enabled` were `true` **and** if the changefeed received an error while attempting to write to the sink. #130614 +- Fixed a bug introduced in v23.2.6, where [statistics]({% link v23.2/create-statistics.md %}) forecasting could predict a result of zero rows for a downward-trending statistic when `sql.stats.forecasts.max_decrease` is `false`. The setting is now enabled (set to `1/3` by default). #131128 - Fixed a bug introduced in v23.1 that can cause incorrect results in the following scenario: 1. The query contains a correlated subquery. 1. The correlated subquery has a `GroupBy` or `DistinctOn` operator with an outer-column reference in its input. 1. The correlated subquery is in the input of a `SELECT` or `JOIN` clause that has a filter that sets the outer-column reference equal to an inner column that is in the input of the grouping operator. 1. The set of grouping columns does not include the replacement column explicitly. - [#130988][#130988] -- Fixed a bug where [AWS S3 and HTTP client configurations]({% link v23.2/cloud-storage-authentication.md %}) were not considered when implicit authentication was used. [#131201][#131201] -- Fixed a bug that could prevent a [changefeed]({% link v23.2/change-data-capture-overview.md %}) from resuming from a prolonged paused state. [#130919][#130919] - -[#128782]: https://github.com/cockroachdb/cockroach/pull/128782 -[#130251]: https://github.com/cockroachdb/cockroach/pull/130251 -[#130343]: https://github.com/cockroachdb/cockroach/pull/130343 -[#130372]: https://github.com/cockroachdb/cockroach/pull/130372 -[#130425]: https://github.com/cockroachdb/cockroach/pull/130425 -[#130519]: https://github.com/cockroachdb/cockroach/pull/130519 -[#130521]: https://github.com/cockroachdb/cockroach/pull/130521 -[#130528]: https://github.com/cockroachdb/cockroach/pull/130528 -[#130614]: https://github.com/cockroachdb/cockroach/pull/130614 -[#130664]: https://github.com/cockroachdb/cockroach/pull/130664 -[#130790]: https://github.com/cockroachdb/cockroach/pull/130790 -[#130919]: https://github.com/cockroachdb/cockroach/pull/130919 -[#130984]: https://github.com/cockroachdb/cockroach/pull/130984 -[#130988]: https://github.com/cockroachdb/cockroach/pull/130988 -[#131065]: https://github.com/cockroachdb/cockroach/pull/131065 -[#131128]: https://github.com/cockroachdb/cockroach/pull/131128 -[#131201]: https://github.com/cockroachdb/cockroach/pull/131201 -[#131417]: https://github.com/cockroachdb/cockroach/pull/131417 -[#131428]: https://github.com/cockroachdb/cockroach/pull/131428 -[#131533]: https://github.com/cockroachdb/cockroach/pull/131533 + #130988 +- Fixed a bug where [AWS S3 and HTTP client configurations]({% link v23.2/cloud-storage-authentication.md %}) were not considered when implicit authentication was used. #131201 +- Fixed a bug that could prevent a [changefeed]({% link v23.2/change-data-capture-overview.md %}) from resuming from a prolonged paused state. #130919 + diff --git a/src/current/_includes/releases/v23.2/v23.2.14.md b/src/current/_includes/releases/v23.2/v23.2.14.md index d4164b123f7..5cf6566506e 100644 --- a/src/current/_includes/releases/v23.2/v23.2.14.md +++ b/src/current/_includes/releases/v23.2/v23.2.14.md @@ -6,6 +6,5 @@ Release Date: October 31, 2024

General changes

-- Added internal client name options to distinguish backup data transfer bytes from those of other clients, such as changefeeds, for updated CockroachDB Cloud [billing metrics](https://www.cockroachlabs.com/docs/cockroachcloud/costs). [#133753][#133753] +- Added internal client name options to distinguish backup data transfer bytes from those of other clients, such as changefeeds, for updated CockroachDB Cloud [billing metrics](https://www.cockroachlabs.com/docs/cockroachcloud/costs). #133753 -[#133753]: https://github.com/cockroachdb/cockroach/pull/133753 diff --git a/src/current/_includes/releases/v23.2/v23.2.15.md b/src/current/_includes/releases/v23.2/v23.2.15.md index 2c9b45cbc19..ea6011a5d23 100644 --- a/src/current/_includes/releases/v23.2/v23.2.15.md +++ b/src/current/_includes/releases/v23.2/v23.2.15.md @@ -6,6 +6,4 @@ Release Date: November 15, 2024

Performance improvements

-- Reduced the write-amplification impact of rebalances by splitting snapshot `sstable` files before ingesting them into Pebble. [#135123][#135123] - -[#135123]: https://github.com/cockroachdb/cockroach/pull/135123 \ No newline at end of file +- Reduced the write-amplification impact of rebalances by splitting snapshot `sstable` files before ingesting them into Pebble. #135123 diff --git a/src/current/_includes/releases/v23.2/v23.2.16.md b/src/current/_includes/releases/v23.2/v23.2.16.md index 25346b7c0df..81ce473af88 100644 --- a/src/current/_includes/releases/v23.2/v23.2.16.md +++ b/src/current/_includes/releases/v23.2/v23.2.16.md @@ -6,83 +6,44 @@ Release Date: November 18, 2024

General changes

-- Changed the license `cockroach` is distributed under to the new CockroachDB Software License (CSL). [#131705][#131705] [#131927][#131927] [#131933][#131933] [#131981][#131981] [#131988][#131988] [#131994][#131994] [#131992][#131992] [#132000][#132000] [#132001][#132001] [#131999][#131999] [#132053][#132053] [#132803][#132803] [#132781][#132781] -- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has a Enterprise Trial or Enterprise Free license, or if the reporting job is unable to load any license at all. [#132461][#132461] -- Added the sink error metric (`changefeed.sink_errors`) and expanded the reporting of the internal retries metric (`changefeed.internal_retry_message_count`) to all changefeed sinks that perform internal retries. [#132569][#132569] -- Allowed access to DB console APIs via JWT, which can be supplied as a Bearer token in the Authorization header. [#133240][#133240] +- Changed the license `cockroach` is distributed under to the new CockroachDB Software License (CSL). #131705 #131927 #131933 #131981 #131988 #131994 #131992 #132000 #132001 #131999 #132053 #132803 #132781 +- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has a Enterprise Trial or Enterprise Free license, or if the reporting job is unable to load any license at all. #132461 +- Added the sink error metric (`changefeed.sink_errors`) and expanded the reporting of the internal retries metric (`changefeed.internal_retry_message_count`) to all changefeed sinks that perform internal retries. #132569 +- Allowed access to DB console APIs via JWT, which can be supplied as a Bearer token in the Authorization header. #133240

DB Console changes

-- DB Console will reflect any throttling behavior from the cluster due to an expired license or missing telemetry data. Enterprise licenses are not affected. [#131859][#131859] -- Due to the inaccuracy of the **Range Count** column on the **Databases** page, and the cost incurred to fetch the correct range count for every database in a cluster, this data will no longer be visible. This data is still available via a `SHOW RANGES` query. [#133271][#133271] +- DB Console will reflect any throttling behavior from the cluster due to an expired license or missing telemetry data. Enterprise licenses are not affected. #131859 +- Due to the inaccuracy of the **Range Count** column on the **Databases** page, and the cost incurred to fetch the correct range count for every database in a cluster, this data will no longer be visible. This data is still available via a `SHOW RANGES` query. #133271

Bug fixes

-- Fixed an error that could happen if an aggregate function was used as the value in a `SET` command. [#131958][#131958] -- Added automated clean-up/validation for dropped roles inside of default privileges. [#132169][#132169] -- Fixed a bug that could cause `RESTORE` to hang after encountering transient errors from the storage layer. [#132260][#132260] -- Fixed a bug that caused incorrect evaluation of `CASE`, `COALESCE`, and `IF` expressions with branches producing fixed-width string-like types, such as `CHAR`. In addition, the `BPCHAR` type has been fixed so that it no longer incorrectly imposes a length limit of `1`. [#130898][#130898] -- Fixed a bug that could lead to incorrect results in rare cases. The bug requires a `JOIN` between two tables, with an equality between columns with equivalent, but not identical types (e.g., `OID` and `REGCLASS`). In addition, the `JOIN` must lookup an index that includes a computed column that references one of the equivalent columns. This bug has existed since before v23.1. [#132510][#132510] -- Fixed a bug that could lead to incorrect results in rare cases. The bug requires a lookup join into a table with a computed index column, where the computed column expression is composite sensitive. A composite sensitive expression can compare differently if supplied non-identical, but equivalent input values (e.g. `2.0::DECIMAL` vs `2.00::DECIMAL`). This bug has existed since before v23.1. [#132510][#132510] -- Fixed a bug where a span stats request on a mixed-version cluster resulted in an NPE. [#132683][#132683] -- The `franz-go` library has been updated to fix a potential deadlock on changefeed restarts. [#132784][#132784] -- Fixed an issue where changefeeds would fail to update protected timestamp records in the face of retryable errors. [#132774][#132774] -- Fixed a bug that could result in changefeeds using CDC queries failing due to a system table being garbage collected. [#131655][#131655] +- Fixed an error that could happen if an aggregate function was used as the value in a `SET` command. #131958 +- Added automated clean-up/validation for dropped roles inside of default privileges. #132169 +- Fixed a bug that could cause `RESTORE` to hang after encountering transient errors from the storage layer. #132260 +- Fixed a bug that caused incorrect evaluation of `CASE`, `COALESCE`, and `IF` expressions with branches producing fixed-width string-like types, such as `CHAR`. In addition, the `BPCHAR` type has been fixed so that it no longer incorrectly imposes a length limit of `1`. #130898 +- Fixed a bug that could lead to incorrect results in rare cases. The bug requires a `JOIN` between two tables, with an equality between columns with equivalent, but not identical types (e.g., `OID` and `REGCLASS`). In addition, the `JOIN` must lookup an index that includes a computed column that references one of the equivalent columns. This bug has existed since before v23.1. #132510 +- Fixed a bug that could lead to incorrect results in rare cases. The bug requires a lookup join into a table with a computed index column, where the computed column expression is composite sensitive. A composite sensitive expression can compare differently if supplied non-identical, but equivalent input values (e.g. `2.0::DECIMAL` vs `2.00::DECIMAL`). This bug has existed since before v23.1. #132510 +- Fixed a bug where a span stats request on a mixed-version cluster resulted in an NPE. #132683 +- The `franz-go` library has been updated to fix a potential deadlock on changefeed restarts. #132784 +- Fixed an issue where changefeeds would fail to update protected timestamp records in the face of retryable errors. #132774 +- Fixed a bug that could result in changefeeds using CDC queries failing due to a system table being garbage collected. #131655 - Fixed a rare bug in which an update of a primary key column that is also the only column in a separate column family can sometimes fail to update the primary index. This bug has existed since v22.2. Requirements to hit the bug are: 1. A table with multiple column families. 2. A column family containing a single primary key column. 3. That column family is not the first column family. 4. That column family existed before its column was in the primary key. 5. That column must be of type `FLOAT4/8`, `DECIMAL`, `JSON`, collated string type, or array. - 6. An update that changes that column from a composite value to a non-composite value. [#132123][#132123] -- The `proretset` column of the `pg_catalog.pg_proc` table is now properly set to `true` for set-returning builtin functions. [#132874][#132874] -- Fixed a bug in the query optimizer that could cause CockroachDB nodes to crash in rare cases. The bug could occur when a query contains a filter in the form `col IN (elem0, elem1, ..., elemN)` only when `N` is very large, e.g., 1.6+ million, and when `col` exists in a hash-sharded index or, exists in a table with an indexed, computed column dependent on `col`. [#133066][#133066] -- Users with the `admin` role can now run `ALTER DEFAULT PRIVILEGES FOR target_role ...` on any `target_role`. Previously, this could result in a privilege error, which is incorrect as admins are allowed to perform any operation. [#133069][#133069] -- `REASSIGN OWNED BY` will now transfer ownership of the `public` schema. Previously, it would always skip over the `public` schema even if it was owned by the target role. [#133069][#133069] -- Added a timer for inner changefeed sink client flushes. Fixed a bug where timers were not correctly registered with the metric system. [#133255][#133255] -- Fixed an error that could be caused by using an `AS OF SYSTEM TIME` expression that references a user-defined (or unknown) type name. These kinds of expressions are invalid, but previously the error was not handled properly. This will now return the correct error message. [#132453][#132453] -- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. [#131389][#131389] + 6. An update that changes that column from a composite value to a non-composite value. #132123 +- The `proretset` column of the `pg_catalog.pg_proc` table is now properly set to `true` for set-returning builtin functions. #132874 +- Fixed a bug in the query optimizer that could cause CockroachDB nodes to crash in rare cases. The bug could occur when a query contains a filter in the form `col IN (elem0, elem1, ..., elemN)` only when `N` is very large, e.g., 1.6+ million, and when `col` exists in a hash-sharded index or, exists in a table with an indexed, computed column dependent on `col`. #133066 +- Users with the `admin` role can now run `ALTER DEFAULT PRIVILEGES FOR target_role ...` on any `target_role`. Previously, this could result in a privilege error, which is incorrect as admins are allowed to perform any operation. #133069 +- `REASSIGN OWNED BY` will now transfer ownership of the `public` schema. Previously, it would always skip over the `public` schema even if it was owned by the target role. #133069 +- Added a timer for inner changefeed sink client flushes. Fixed a bug where timers were not correctly registered with the metric system. #133255 +- Fixed an error that could be caused by using an `AS OF SYSTEM TIME` expression that references a user-defined (or unknown) type name. These kinds of expressions are invalid, but previously the error was not handled properly. This will now return the correct error message. #132453 +- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. #131389

Performance improvements

-- Performance has been improved during periodic polling of table history when the `schema_locked` table storage parameter is not enabled. [#132240][#132240] -- Reduced the write-amplification impact of rebalances by splitting snapshot SSTable files into smaller ones before ingesting them into Pebble. [#134526][#134526] - -[#130898]: https://github.com/cockroachdb/cockroach/pull/130898 -[#131389]: https://github.com/cockroachdb/cockroach/pull/131389 -[#131655]: https://github.com/cockroachdb/cockroach/pull/131655 -[#131705]: https://github.com/cockroachdb/cockroach/pull/131705 -[#131859]: https://github.com/cockroachdb/cockroach/pull/131859 -[#131927]: https://github.com/cockroachdb/cockroach/pull/131927 -[#131933]: https://github.com/cockroachdb/cockroach/pull/131933 -[#131958]: https://github.com/cockroachdb/cockroach/pull/131958 -[#131973]: https://github.com/cockroachdb/cockroach/pull/131973 -[#131981]: https://github.com/cockroachdb/cockroach/pull/131981 -[#131988]: https://github.com/cockroachdb/cockroach/pull/131988 -[#131992]: https://github.com/cockroachdb/cockroach/pull/131992 -[#131994]: https://github.com/cockroachdb/cockroach/pull/131994 -[#131999]: https://github.com/cockroachdb/cockroach/pull/131999 -[#132000]: https://github.com/cockroachdb/cockroach/pull/132000 -[#132001]: https://github.com/cockroachdb/cockroach/pull/132001 -[#132053]: https://github.com/cockroachdb/cockroach/pull/132053 -[#132058]: https://github.com/cockroachdb/cockroach/pull/132058 -[#132123]: https://github.com/cockroachdb/cockroach/pull/132123 -[#132169]: https://github.com/cockroachdb/cockroach/pull/132169 -[#132240]: https://github.com/cockroachdb/cockroach/pull/132240 -[#132260]: https://github.com/cockroachdb/cockroach/pull/132260 -[#132453]: https://github.com/cockroachdb/cockroach/pull/132453 -[#132461]: https://github.com/cockroachdb/cockroach/pull/132461 -[#132510]: https://github.com/cockroachdb/cockroach/pull/132510 -[#132569]: https://github.com/cockroachdb/cockroach/pull/132569 -[#132683]: https://github.com/cockroachdb/cockroach/pull/132683 -[#132774]: https://github.com/cockroachdb/cockroach/pull/132774 -[#132781]: https://github.com/cockroachdb/cockroach/pull/132781 -[#132784]: https://github.com/cockroachdb/cockroach/pull/132784 -[#132803]: https://github.com/cockroachdb/cockroach/pull/132803 -[#132874]: https://github.com/cockroachdb/cockroach/pull/132874 -[#133066]: https://github.com/cockroachdb/cockroach/pull/133066 -[#133069]: https://github.com/cockroachdb/cockroach/pull/133069 -[#133240]: https://github.com/cockroachdb/cockroach/pull/133240 -[#133255]: https://github.com/cockroachdb/cockroach/pull/133255 -[#133271]: https://github.com/cockroachdb/cockroach/pull/133271 -[#134526]: https://github.com/cockroachdb/cockroach/pull/134526 \ No newline at end of file +- Performance has been improved during periodic polling of table history when the `schema_locked` table storage parameter is not enabled. #132240 +- Reduced the write-amplification impact of rebalances by splitting snapshot SSTable files into smaller ones before ingesting them into Pebble. #134526 diff --git a/src/current/_includes/releases/v23.2/v23.2.17.md b/src/current/_includes/releases/v23.2/v23.2.17.md index 29855bc2638..5822f494db5 100644 --- a/src/current/_includes/releases/v23.2/v23.2.17.md +++ b/src/current/_includes/releases/v23.2/v23.2.17.md @@ -6,58 +6,37 @@ Release Date: December 12, 2024

Security updates

-- All cluster settings that accept strings are now fully redacted when transmitted as part of Cockroach Labs' diagnostics telemetry. The payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in Technical Advisory 133479, you can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to `false` and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster setting values. [#134015][#134015] +- All cluster settings that accept strings are now fully redacted when transmitted as part of Cockroach Labs' diagnostics telemetry. The payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in Technical Advisory 133479, you can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to `false` and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster setting values. #134015

General changes

-- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134085][#134085] +- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. #134085

{{ site.data.products.enterprise }} edition changes

-- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps (PTS). This table is required for CDC queries. [#134233][#134233] +- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps (PTS). This table is required for CDC queries. #134233

Operational changes

-- Added a new cluster setting `ui.database_locality_metadata.enabled`, which allows operators to disable loading extended database and table region information in the DB Console's Databases and Table Details pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 onwards do not have this problem. If you require this data, you can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. [#134093][#134093] +- Added a new cluster setting `ui.database_locality_metadata.enabled`, which allows operators to disable loading extended database and table region information in the DB Console's Databases and Table Details pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 onwards do not have this problem. If you require this data, you can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. #134093

Bug fixes

-- Previously, CockroachDB could encounter an internal error of the form `interface conversion: coldata.Column is` in an edge case. This is now fixed. The bug was present in versions v22.2.13 and later, v23.1.9 and later, and v23.2 and later. [#133759][#133759] -- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. [#133823][#133823] -- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE ... FROM user` on an object that was not a sequence. [#133707][#133707] -- Addressed a panic that could occur inside `CREATE TABLE AS` that occurred if sequence builtin expressions had invalid function overloads. [#133867][#133867] -- String constants can now be compared against collated strings. [#134114][#134114] -- Previously, when executing queries with index or lookup joins when the ordering needed to be maintained, CockroachDB in some cases could get into a pathological state which would lead to increased query latency, possibly by several orders of magnitude. This bug was introduced in v22.2 and is now fixed. [#134364][#134364] -- Addressed a bug with `DROP CASCADE` that would occasionally panic with an `un-dropped backref` message on partitioned tables. [#134523][#134523] -- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. [#134602][#134602] -- An error message is no longer returned when a non-admin user runs `DROP ROLE IF EXISTS` on a user that does not exist. [#134967][#134967] -- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). [#135113][#135113] -- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135196][#135196] -- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#135691][#135691] +- Previously, CockroachDB could encounter an internal error of the form `interface conversion: coldata.Column is` in an edge case. This is now fixed. The bug was present in versions v22.2.13 and later, v23.1.9 and later, and v23.2 and later. #133759 +- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. #133823 +- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE ... FROM user` on an object that was not a sequence. #133707 +- Addressed a panic that could occur inside `CREATE TABLE AS` that occurred if sequence builtin expressions had invalid function overloads. #133867 +- String constants can now be compared against collated strings. #134114 +- Previously, when executing queries with index or lookup joins when the ordering needed to be maintained, CockroachDB in some cases could get into a pathological state which would lead to increased query latency, possibly by several orders of magnitude. This bug was introduced in v22.2 and is now fixed. #134364 +- Addressed a bug with `DROP CASCADE` that would occasionally panic with an `un-dropped backref` message on partitioned tables. #134523 +- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. #134602 +- An error message is no longer returned when a non-admin user runs `DROP ROLE IF EXISTS` on a user that does not exist. #134967 +- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). #135113 +- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. #135196 +- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. #135691

Performance improvements

-- CockroachDB now avoids loading unnecessary file blocks shortly after a rebalance in a rare case. [#134526][#134526] [#135303][#135303] [#135577][#135577] -- Reduced the write-amplification impact of rebalances by splitting snapshot sstable files into smaller ones before ingesting them into Pebble. [#134526][#134526] [#135303][#135303] [#135577][#135577] - -[#133707]: https://github.com/cockroachdb/cockroach/pull/133707 -[#133759]: https://github.com/cockroachdb/cockroach/pull/133759 -[#133823]: https://github.com/cockroachdb/cockroach/pull/133823 -[#133867]: https://github.com/cockroachdb/cockroach/pull/133867 -[#134015]: https://github.com/cockroachdb/cockroach/pull/134015 -[#134085]: https://github.com/cockroachdb/cockroach/pull/134085 -[#134093]: https://github.com/cockroachdb/cockroach/pull/134093 -[#134114]: https://github.com/cockroachdb/cockroach/pull/134114 -[#134233]: https://github.com/cockroachdb/cockroach/pull/134233 -[#134364]: https://github.com/cockroachdb/cockroach/pull/134364 -[#134523]: https://github.com/cockroachdb/cockroach/pull/134523 -[#134526]: https://github.com/cockroachdb/cockroach/pull/134526 -[#134602]: https://github.com/cockroachdb/cockroach/pull/134602 -[#134647]: https://github.com/cockroachdb/cockroach/pull/134647 -[#134967]: https://github.com/cockroachdb/cockroach/pull/134967 -[#135113]: https://github.com/cockroachdb/cockroach/pull/135113 -[#135196]: https://github.com/cockroachdb/cockroach/pull/135196 -[#135303]: https://github.com/cockroachdb/cockroach/pull/135303 -[#135577]: https://github.com/cockroachdb/cockroach/pull/135577 -[#135691]: https://github.com/cockroachdb/cockroach/pull/135691 -[#136006]: https://github.com/cockroachdb/cockroach/pull/136006 +- CockroachDB now avoids loading unnecessary file blocks shortly after a rebalance in a rare case. #134526 #135303 #135577 +- Reduced the write-amplification impact of rebalances by splitting snapshot sstable files into smaller ones before ingesting them into Pebble. #134526 #135303 #135577 + diff --git a/src/current/_includes/releases/v23.2/v23.2.18.md b/src/current/_includes/releases/v23.2/v23.2.18.md index f4f9ac2546c..f9f28cf1d47 100644 --- a/src/current/_includes/releases/v23.2/v23.2.18.md +++ b/src/current/_includes/releases/v23.2/v23.2.18.md @@ -6,7 +6,6 @@ Release Date: December 26, 2024

SQL language changes

-- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. [#137945][#137945] +- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. #137945 -[#137945]: https://github.com/cockroachdb/cockroach/pull/137945 diff --git a/src/current/_includes/releases/v23.2/v23.2.19.md b/src/current/_includes/releases/v23.2/v23.2.19.md index 436072ce485..1f892457ef4 100644 --- a/src/current/_includes/releases/v23.2/v23.2.19.md +++ b/src/current/_includes/releases/v23.2/v23.2.19.md @@ -9,61 +9,37 @@ Release Date: January 9, 2025 - The cluster setting `server.jwt_authentication.issuers` now takes the issuers configuration value from the URI. This can be set to one of the following values: 1. Simple string that can be parsed as a valid issuer URL. For example: `'https://accounts.google.com'`. 1. String that can be parsed as a valid JSON array of issuer URLs list. For example: `['example.com/adfs','https://accounts.google.com']`. - 1. String that can be parsed as valid JSON and deserialized into a map of issuer URLs to corresponding JWKS URIs. In this case, the JWKS URI present in the issuer's well-known endpoint will be overridden. For example: `'{"issuer_jwks_map": {"https://accounts.google.com": "https://www.googleapis.com/oauth2/v3/certs", "example.com/adfs": "https://example.com/adfs/discovery/keys"}}'`. When `issuer_jwks_map` is set, the JWKS URI is directly used to get the key set. In all other cases when `JWKSAutoFetchEnabled` is set, the JWKS URI is obtained first from the issuer's well-known endpoint and then this endpoint is used. [#138188][#138188] + 1. String that can be parsed as valid JSON and deserialized into a map of issuer URLs to corresponding JWKS URIs. In this case, the JWKS URI present in the issuer's well-known endpoint will be overridden. For example: `'{"issuer_jwks_map": {"https://accounts.google.com": "https://www.googleapis.com/oauth2/v3/certs", "example.com/adfs": "https://example.com/adfs/discovery/keys"}}'`. When `issuer_jwks_map` is set, the JWKS URI is directly used to get the key set. In all other cases when `JWKSAutoFetchEnabled` is set, the JWKS URI is obtained first from the issuer's well-known endpoint and then this endpoint is used. #138188

General changes

-- In order to improve the granularity of changefeed pipeline metrics, the changefeed metrics `changefeed.admit_latency` and `changefeed.commit_latency` now have histogram buckets from `5ms` to `60m` (previously `500ms` to `5m`). The changefeed metrics `changefeed.parallel_io_queue_nanos`, `changefeed.parallel_io_result_queue_nanos`, `changefeed.sink_batch_hist_nanos`, `changefeed.flush_hist_nanos`, and `changefeed.kafka_throttling_hist_nanos` have histogram buckets from `5ms` to `10m` (previously `500ms` to `5m`). [#136618][#136618] -- Added support for multiple seed brokers in the new Kafka sink. [#136745][#136745] -- Added a metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. [#136837][#136837] -- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. [#136018][#136018] +- In order to improve the granularity of changefeed pipeline metrics, the changefeed metrics `changefeed.admit_latency` and `changefeed.commit_latency` now have histogram buckets from `5ms` to `60m` (previously `500ms` to `5m`). The changefeed metrics `changefeed.parallel_io_queue_nanos`, `changefeed.parallel_io_result_queue_nanos`, `changefeed.sink_batch_hist_nanos`, `changefeed.flush_hist_nanos`, and `changefeed.kafka_throttling_hist_nanos` have histogram buckets from `5ms` to `10m` (previously `500ms` to `5m`). #136618 +- Added support for multiple seed brokers in the new Kafka sink. #136745 +- Added a metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. #136837 +- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. #136018

SQL language changes

-- Added the `legacy_varchar_typing` session setting that reverts the changes of [#133037][#133037] that caused the change in typing behavior described in [#137837][#137837]. Specifically, the `legacy_varchar_typing` session setting makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. [#137922][#137922] +- Added the `legacy_varchar_typing` session setting that reverts the changes of #133037 that caused the change in typing behavior described in #137837. Specifically, the `legacy_varchar_typing` session setting makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. #137922

Operational changes

-- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. [#136477][#136477] -- When a schema change job completes, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. [#136952][#136952] +- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. #136477 +- When a schema change job completes, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. #136952

Bug fixes

-- `CREATE SCHEMA` now returns the correct error if the schema name is missing. [#135925][#135925] -- Fixed an issue where corrupted table statistics could cause the CockroachDB process to crash. [#136043][#136043] -- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. [#136508][#136508] -- Fixed a bug that caused the optimizer to use stale table statistics after altering an enum type used in the table. [#136832][#136832] -- CockroachDB now better respects `statement_timeout` limit on queries involving the top K sort and merge join operations. [#136650][#136650] -- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `start-single-node`. Now, cluster restarts correctly disable licensing. [#137009][#137009] -- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. [#137356][#137356] -- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) primary key was locked from within a subquery, for example, ` SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;` . The error could occur either under read-committed isolation, or with `optimizer_use_lock_op_for_serializable` enabled. [#137130][#137130] -- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. [#137678][#137678] -- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug has been present since at least v23.1. [#137788][#137788] -- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. [#137703][#137703] -- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is enabled. [#137720][#137720] -- A new column of type `JSON` or `JSONB` that has a `UNIQUE` constraint will now be blocked from being added to a table if the cluster has not yet finalized the upgrade to v23.2. [#137864][#137864] +- `CREATE SCHEMA` now returns the correct error if the schema name is missing. #135925 +- Fixed an issue where corrupted table statistics could cause the CockroachDB process to crash. #136043 +- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. #136508 +- Fixed a bug that caused the optimizer to use stale table statistics after altering an enum type used in the table. #136832 +- CockroachDB now better respects `statement_timeout` limit on queries involving the top K sort and merge join operations. #136650 +- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `start-single-node`. Now, cluster restarts correctly disable licensing. #137009 +- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. #137356 +- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) primary key was locked from within a subquery, for example, ` SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;` . The error could occur either under read-committed isolation, or with `optimizer_use_lock_op_for_serializable` enabled. #137130 +- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. #137678 +- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug has been present since at least v23.1. #137788 +- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. #137703 +- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is enabled. #137720 +- A new column of type `JSON` or `JSONB` that has a `UNIQUE` constraint will now be blocked from being added to a table if the cluster has not yet finalized the upgrade to v23.2. #137864 -[#133037]: https://github.com/cockroachdb/cockroach/pull/133037 -[#137837]: https://github.com/cockroachdb/cockroach/pull/137837 -[#135925]: https://github.com/cockroachdb/cockroach/pull/135925 -[#136018]: https://github.com/cockroachdb/cockroach/pull/136018 -[#136043]: https://github.com/cockroachdb/cockroach/pull/136043 -[#136477]: https://github.com/cockroachdb/cockroach/pull/136477 -[#136508]: https://github.com/cockroachdb/cockroach/pull/136508 -[#136618]: https://github.com/cockroachdb/cockroach/pull/136618 -[#136650]: https://github.com/cockroachdb/cockroach/pull/136650 -[#136745]: https://github.com/cockroachdb/cockroach/pull/136745 -[#136832]: https://github.com/cockroachdb/cockroach/pull/136832 -[#136837]: https://github.com/cockroachdb/cockroach/pull/136837 -[#136952]: https://github.com/cockroachdb/cockroach/pull/136952 -[#137009]: https://github.com/cockroachdb/cockroach/pull/137009 -[#137111]: https://github.com/cockroachdb/cockroach/pull/137111 -[#137130]: https://github.com/cockroachdb/cockroach/pull/137130 -[#137356]: https://github.com/cockroachdb/cockroach/pull/137356 -[#137678]: https://github.com/cockroachdb/cockroach/pull/137678 -[#137703]: https://github.com/cockroachdb/cockroach/pull/137703 -[#137720]: https://github.com/cockroachdb/cockroach/pull/137720 -[#137788]: https://github.com/cockroachdb/cockroach/pull/137788 -[#137864]: https://github.com/cockroachdb/cockroach/pull/137864 -[#137922]: https://github.com/cockroachdb/cockroach/pull/137922 -[#138188]: https://github.com/cockroachdb/cockroach/pull/138188 diff --git a/src/current/_includes/releases/v23.2/v23.2.2.md b/src/current/_includes/releases/v23.2/v23.2.2.md index b4ebca52c21..701aaf16a24 100644 --- a/src/current/_includes/releases/v23.2/v23.2.2.md +++ b/src/current/_includes/releases/v23.2/v23.2.2.md @@ -6,7 +6,7 @@ Release Date: February 27, 2024

Bug fixes

-- Fixed a bug where [rangefeed]({% link v23.2/create-and-configure-changefeeds.md %}#enable-rangefeeds) resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a [range merge](https://www.cockroachlabs.com/docs/v23.2/architecture/distribution-layer#range-merges). This bug was introduced in v23.2.1. [#119558][#119558] +- Fixed a bug where [rangefeed]({% link v23.2/create-and-configure-changefeeds.md %}#enable-rangefeeds) resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a [range merge](https://www.cockroachlabs.com/docs/v23.2/architecture/distribution-layer#range-merges). This bug was introduced in v23.2.1. #119558
@@ -16,4 +16,3 @@ This release includes 2 merged PRs by 2 authors.
-[#119558]: https://github.com/cockroachdb/cockroach/pull/119558 diff --git a/src/current/_includes/releases/v23.2/v23.2.20.md b/src/current/_includes/releases/v23.2/v23.2.20.md index 4a38c631382..b1e7d7b7672 100644 --- a/src/current/_includes/releases/v23.2/v23.2.20.md +++ b/src/current/_includes/releases/v23.2/v23.2.20.md @@ -6,27 +6,17 @@ Release Date: February 6, 2025

SQL language changes

-- Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. [#140269][#140269] +- Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. #140269

Operational changes

-- The `changefeed.max_behind_nanos` metric now supports scoping with metrics labels. [#139241][#139241] +- The `changefeed.max_behind_nanos` metric now supports scoping with metrics labels. #139241

Bug fixes

-- Previously, `SHOW CREATE TABLE` was showing incorrect data with regards to inverted indexes. It now shows the correct data that can be repeatedly entered back into CockroachDB to recreate the same table. [#138167][#138167] -- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. [#138286][#138286] -- Fixed a bounded memory leak that could occur when collecting table statistics on a table that had both very wide (10KiB or more) and relatively small (under 400B) `BYTES`-like values within the same row. This leak had been present since before v19.2. [#139177][#139177] -- Fixed a bug where changefeeds using CDC queries could have duplicate columns in the Parquet output. [#140155][#140155] -- Fixed a bug that prevented the `CREATE` statement for a routine from being included in a statement bundle when the routine was created on a schema other than `public`. The bug had existed since v23.1. [#140260][#140260] - -[#137998]: https://github.com/cockroachdb/cockroach/pull/137998 -[#138152]: https://github.com/cockroachdb/cockroach/pull/138152 -[#138167]: https://github.com/cockroachdb/cockroach/pull/138167 -[#138286]: https://github.com/cockroachdb/cockroach/pull/138286 -[#138952]: https://github.com/cockroachdb/cockroach/pull/138952 -[#139177]: https://github.com/cockroachdb/cockroach/pull/139177 -[#139241]: https://github.com/cockroachdb/cockroach/pull/139241 -[#140155]: https://github.com/cockroachdb/cockroach/pull/140155 -[#140260]: https://github.com/cockroachdb/cockroach/pull/140260 -[#140269]: https://github.com/cockroachdb/cockroach/pull/140269 +- Previously, `SHOW CREATE TABLE` was showing incorrect data with regards to inverted indexes. It now shows the correct data that can be repeatedly entered back into CockroachDB to recreate the same table. #138167 +- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. #138286 +- Fixed a bounded memory leak that could occur when collecting table statistics on a table that had both very wide (10KiB or more) and relatively small (under 400B) `BYTES`-like values within the same row. This leak had been present since before v19.2. #139177 +- Fixed a bug where changefeeds using CDC queries could have duplicate columns in the Parquet output. #140155 +- Fixed a bug that prevented the `CREATE` statement for a routine from being included in a statement bundle when the routine was created on a schema other than `public`. The bug had existed since v23.1. #140260 + diff --git a/src/current/_includes/releases/v23.2/v23.2.21.md b/src/current/_includes/releases/v23.2/v23.2.21.md index 076839e443d..c407d938614 100644 --- a/src/current/_includes/releases/v23.2/v23.2.21.md +++ b/src/current/_includes/releases/v23.2/v23.2.21.md @@ -6,17 +6,12 @@ Release Date: March 6, 2025

SQL language changes

-- Since v23.2, table statistics histograms have been collected for non-indexed `JSONB` columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. [#140144][#140144] +- Since v23.2, table statistics histograms have been collected for non-indexed `JSONB` columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. #140144

Bug fixes

-- Fixed a bug where under rare circumstances draining a node could fail with `some sessions did not respond to cancellation within 1s`. [#139479][#139479] -- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug has existed since v23.1. [#136131][#136131] -- Fixed a bounded memory leak that could previously occur when evaluating some memory-intensive queries via the vectorized engine. The leak has been present since v20.2. [#139098][#139098] -- Previously, in changefeeds using CDC queries and the Parquet format, the output would include duplicate columns when it contained a user-defined primary key. Now, the columns are de-duplicated in Parquet changefeed messages. [#140380][#140380] +- Fixed a bug where under rare circumstances draining a node could fail with `some sessions did not respond to cancellation within 1s`. #139479 +- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug has existed since v23.1. #136131 +- Fixed a bounded memory leak that could previously occur when evaluating some memory-intensive queries via the vectorized engine. The leak has been present since v20.2. #139098 +- Previously, in changefeeds using CDC queries and the Parquet format, the output would include duplicate columns when it contained a user-defined primary key. Now, the columns are de-duplicated in Parquet changefeed messages. #140380 -[#136131]: https://github.com/cockroachdb/cockroach/pull/136131 -[#139098]: https://github.com/cockroachdb/cockroach/pull/139098 -[#139479]: https://github.com/cockroachdb/cockroach/pull/139479 -[#140144]: https://github.com/cockroachdb/cockroach/pull/140144 -[#140380]: https://github.com/cockroachdb/cockroach/pull/140380 diff --git a/src/current/_includes/releases/v23.2/v23.2.22.md b/src/current/_includes/releases/v23.2/v23.2.22.md index 84926de9231..6039ba05acd 100644 --- a/src/current/_includes/releases/v23.2/v23.2.22.md +++ b/src/current/_includes/releases/v23.2/v23.2.22.md @@ -7,25 +7,20 @@ Release Date: April 2, 2025

General changes

- The protected timestamp (PTS) records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. - [#141157][#141157] + #141157

Bug fixes

- Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index that used the primary key column as the predicate expression. - [#141825][#141825] + #141825 - Fixed a rare bug in which a query could fail with the error `could not find computed column expression for column in table` while dropping a virtual computed column from a table. This bug was introduced in v23.2.4. - [#139874][#139874] + #139874 - Fixed a bug that could cause `nil pointer dereference` errors when executing statements with user-defined functions (UDFs). The error could also occur when executing statements with some built-in functions, like `obj_description`. - [#141666][#141666] + #141666

Miscellaneous

- When configuring the `sql.ttl.default_delete_rate_limit` cluster setting, a notice is displayed informing the user that the TTL rate limit is per leaseholder per table with a link to the docs. - [#142831][#142831] + #142831 -[#141666]: https://github.com/cockroachdb/cockroach/pull/141666 -[#142831]: https://github.com/cockroachdb/cockroach/pull/142831 -[#141157]: https://github.com/cockroachdb/cockroach/pull/141157 -[#141825]: https://github.com/cockroachdb/cockroach/pull/141825 -[#139874]: https://github.com/cockroachdb/cockroach/pull/139874 diff --git a/src/current/_includes/releases/v23.2/v23.2.23.md b/src/current/_includes/releases/v23.2/v23.2.23.md index a7544bc961a..0a63dd4a458 100644 --- a/src/current/_includes/releases/v23.2/v23.2.23.md +++ b/src/current/_includes/releases/v23.2/v23.2.23.md @@ -7,7 +7,6 @@ Release Date: April 9, 2025

Bug fixes

- Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144062][#144062] + #144062 -[#144062]: https://github.com/cockroachdb/cockroach/pull/144062 diff --git a/src/current/_includes/releases/v23.2/v23.2.24.md b/src/current/_includes/releases/v23.2/v23.2.24.md index 0a33fefc949..0f9c6a252e2 100644 --- a/src/current/_includes/releases/v23.2/v23.2.24.md +++ b/src/current/_includes/releases/v23.2/v23.2.24.md @@ -6,6 +6,4 @@ Release Date: April 28, 2025

Bug fixes

-- Fixed a rare corruption bug that impacts import and materialized views. [#144659][#144659] - -[#144659]: https://github.com/cockroachdb/cockroach/pull/144659 \ No newline at end of file +- Fixed a rare corruption bug that impacts import and materialized views. #144659 diff --git a/src/current/_includes/releases/v23.2/v23.2.25.md b/src/current/_includes/releases/v23.2/v23.2.25.md index 0404f16fb12..c9a0d7300df 100644 --- a/src/current/_includes/releases/v23.2/v23.2.25.md +++ b/src/current/_includes/releases/v23.2/v23.2.25.md @@ -7,25 +7,19 @@ Release Date: April 30, 2025

SQL language changes

- Added the `WITH IGNORE_FOREIGN_KEYS` option to `SHOW CREATE TABLE` which omits foreign key constraints from the output schema. This option is also allowed in `SHOW CREATE VIEW`, but has no effect. It cannot be combined with the `WITH REDACT` option. - [#142165][#142165] + #142165

Bug fixes

- Fixed a bug where CockroachDB would encounter an internal error when decoding the gists of plans with `CALL` statements. The bug had been present since v23.2. - [#143312][#143312] + #143312 - Fixed a bug that caused changefeeds to fail on startup when scanning a single key. - [#143147][#143147] + #143147 - Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144029][#144029] + #144029 - Fixed a bug that could leave behind a dangling reference to a dropped role if that role had default privileges granted to itself. The bug was caused by defining privileges such as: `ALTER DEFAULT PRIVILEGES FOR ROLE self_referencing_role GRANT INSERT ON TABLES TO self_referencing_role`. - [#143291][#143291] + #143291 - MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amount of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. - [#143274][#143274] + #143274 -[#142165]: https://github.com/cockroachdb/cockroach/pull/142165 -[#143312]: https://github.com/cockroachdb/cockroach/pull/143312 -[#143147]: https://github.com/cockroachdb/cockroach/pull/143147 -[#144029]: https://github.com/cockroachdb/cockroach/pull/144029 -[#143291]: https://github.com/cockroachdb/cockroach/pull/143291 -[#143274]: https://github.com/cockroachdb/cockroach/pull/143274 diff --git a/src/current/_includes/releases/v23.2/v23.2.26.md b/src/current/_includes/releases/v23.2/v23.2.26.md index 06f2f27f45d..34b8a87fb48 100644 --- a/src/current/_includes/releases/v23.2/v23.2.26.md +++ b/src/current/_includes/releases/v23.2/v23.2.26.md @@ -6,14 +6,9 @@ Release Date: May 28, 2025

Bug fixes

-- Fixed a bug where using values for the cluster setting `changefeed.aggregator.flush_jitter` and the changefeed option `min_checkpoint_frequency` resulting in `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. [#144423][#144423] -- Improved the performance of `SHOW CREATE TABLE` on multi-region databases with a large numbers of objects. [#145081][#145081] -- Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. [#146200][#146200] -- Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. [#146322][#146322] -- Fixed a rare corruption bug that impacts import and materialized views. [#144659][#144659] +- Fixed a bug where using values for the cluster setting `changefeed.aggregator.flush_jitter` and the changefeed option `min_checkpoint_frequency` resulting in `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. #144423 +- Improved the performance of `SHOW CREATE TABLE` on multi-region databases with a large numbers of objects. #145081 +- Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. #146200 +- Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. #146322 +- Fixed a rare corruption bug that impacts import and materialized views. #144659 -[#144423]: https://github.com/cockroachdb/cockroach/pull/144423 -[#145081]: https://github.com/cockroachdb/cockroach/pull/145081 -[#146200]: https://github.com/cockroachdb/cockroach/pull/146200 -[#146322]: https://github.com/cockroachdb/cockroach/pull/146322 -[#144659]: https://github.com/cockroachdb/cockroach/pull/144659 diff --git a/src/current/_includes/releases/v23.2/v23.2.27.md b/src/current/_includes/releases/v23.2/v23.2.27.md index f3fe0298191..827083e5eea 100644 --- a/src/current/_includes/releases/v23.2/v23.2.27.md +++ b/src/current/_includes/releases/v23.2/v23.2.27.md @@ -6,22 +6,16 @@ Release Date: June 25, 2025

Operational changes

-- The `goschedstats.always_use_short_sample_period.enabled` cluster setting should be set to `true` for any serious production cluster; this will prevent unnecessary queuing in admission control CPU queues. [#146742][#146742] +- The `goschedstats.always_use_short_sample_period.enabled` cluster setting should be set to `true` for any serious production cluster; this will prevent unnecessary queuing in admission control CPU queues. #146742

Bug fixes

-- Fixed a bug that could potentially cause a changefeed to complete erroneously when one of its watched tables encounters a schema change. [#147040][#147040] -- Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. [#147219][#147219] -- Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). [#147339][#147339] -- Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in v23.2.22, v24.1.14, v24.3.9, v25.1.2, and the v25.2 alpha. [#147456][#147456] +- Fixed a bug that could potentially cause a changefeed to complete erroneously when one of its watched tables encounters a schema change. #147040 +- Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. #147219 +- Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). #147339 +- Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in v23.2.22, v24.1.14, v24.3.9, v25.1.2, and the v25.2 alpha. #147456

Performance improvements

-- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. [#147224][#147224] +- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. #147224 -[#147219]: https://github.com/cockroachdb/cockroach/pull/147219 -[#147339]: https://github.com/cockroachdb/cockroach/pull/147339 -[#147456]: https://github.com/cockroachdb/cockroach/pull/147456 -[#147224]: https://github.com/cockroachdb/cockroach/pull/147224 -[#146742]: https://github.com/cockroachdb/cockroach/pull/146742 -[#147040]: https://github.com/cockroachdb/cockroach/pull/147040 diff --git a/src/current/_includes/releases/v23.2/v23.2.28.md b/src/current/_includes/releases/v23.2/v23.2.28.md index c180ffa9f22..b022fd9609d 100644 --- a/src/current/_includes/releases/v23.2/v23.2.28.md +++ b/src/current/_includes/releases/v23.2/v23.2.28.md @@ -6,6 +6,5 @@ Release Date: September 4, 2025

Performance improvements

-- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. [#152926][#152926] +- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. #152926 -[#152926]: https://github.com/cockroachdb/cockroach/pull/152926 diff --git a/src/current/_includes/releases/v23.2/v23.2.29.md b/src/current/_includes/releases/v23.2/v23.2.29.md index 4b4f99026ca..a8a87a1dd4c 100644 --- a/src/current/_includes/releases/v23.2/v23.2.29.md +++ b/src/current/_includes/releases/v23.2/v23.2.29.md @@ -6,7 +6,6 @@ Release Date: February 19, 2026

Bug fixes

-- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. [#163775][#163775] +- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. #163775 -[#163775]: https://github.com/cockroachdb/cockroach/pull/163775 diff --git a/src/current/_includes/releases/v23.2/v23.2.3.md b/src/current/_includes/releases/v23.2/v23.2.3.md index a03ac245ca0..99c3ed75e39 100644 --- a/src/current/_includes/releases/v23.2/v23.2.3.md +++ b/src/current/_includes/releases/v23.2/v23.2.3.md @@ -6,40 +6,40 @@ Release Date: March 20, 2024

Security updates

-- The [DB Console](../v23.2/ui-overview.html) `session` cookie is now marked `HttpOnly` to prevent it from being read by any Javascript code. Cookies are also marked `Secure` for the browser when the cluster is running in secure mode. [#119259][#119259] -- Clusters using [Cluster Single Sign-on (SSO) with JSON web tokens (JWTs)](../v23.2/sso-sql.html) can now optionally fetch signing keys from configured issuers instead of configuring static signing keys for each issuer. When the new cluster setting `server.jwt_authentication.jwks_auto_fetch.enabled` is set to `true`, signing keys are automatically fetched from the issuer using metadata published in its OpenID configuration. In this case, static signing keys in `server.jwt_authentication.jwks` are ignored. When automatic fetching is enabled, there may be a slight increase in network latency for each JWT authentication request, proportional to the latency between the cluster and the issuer's endpoint. [#119768][#119768] +- The [DB Console](../v23.2/ui-overview.html) `session` cookie is now marked `HttpOnly` to prevent it from being read by any Javascript code. Cookies are also marked `Secure` for the browser when the cluster is running in secure mode. #119259 +- Clusters using [Cluster Single Sign-on (SSO) with JSON web tokens (JWTs)](../v23.2/sso-sql.html) can now optionally fetch signing keys from configured issuers instead of configuring static signing keys for each issuer. When the new cluster setting `server.jwt_authentication.jwks_auto_fetch.enabled` is set to `true`, signing keys are automatically fetched from the issuer using metadata published in its OpenID configuration. In this case, static signing keys in `server.jwt_authentication.jwks` are ignored. When automatic fetching is enabled, there may be a slight increase in network latency for each JWT authentication request, proportional to the latency between the cluster and the issuer's endpoint. #119768

{{ site.data.products.enterprise }} edition changes

-- Fixed a bug where creating a changefeed with the [`format='avro'`](../v23.2/create-changefeed.html#format) and [`diff`](../v23.2/create-changefeed.html#diff-opt) options that targeted tables with a `DECIMAL(n)` column (i.e., zero-scale `DECIMAL` column) would cause a panic. [#118847][#118847] +- Fixed a bug where creating a changefeed with the [`format='avro'`](../v23.2/create-changefeed.html#format) and [`diff`](../v23.2/create-changefeed.html#diff-opt) options that targeted tables with a `DECIMAL(n)` column (i.e., zero-scale `DECIMAL` column) would cause a panic. #118847

SQL language changes

-- Changed the `sql.index_recommendation.drop_unused_duration` cluster setting to `public` so that it is documented on the [Cluster Settings](../v23.2/cluster-settings.html) page. [#118764][#118764] -- Added the `server.max_open_transactions_per_gateway` [cluster setting](../v23.2/cluster-settings.html). When set to a non-negative value, non-admin users cannot execute a query if the number of transactions open on the current gateway node is already at the configured limit. [#118933][#118933] -- Out-of-process SQL servers will now start exporting a new `sql.aggregated_livebytes` [metric](../v23.2/metrics.html). This metric gets updated once every 60 seconds by default, and its update interval can be configured via the `tenant_global_metrics_exporter_interval` [cluster setting](../v23.2/cluster-settings.html). [#119371][#119371] -- Added support for index hints with [`INSERT`](../v23.2/insert.html) and [`UPSERT`](../v23.2/upsert.html) statements. This allows `INSERT ... ON CONFLICT` and `UPSERT` queries to use index hints in the same way they are already supported for [`UPDATE`](../v23.2/update.html) and [`DELETE`](../v23.2/delete.html) statements. [#119601][#119601] +- Changed the `sql.index_recommendation.drop_unused_duration` cluster setting to `public` so that it is documented on the [Cluster Settings](../v23.2/cluster-settings.html) page. #118764 +- Added the `server.max_open_transactions_per_gateway` [cluster setting](../v23.2/cluster-settings.html). When set to a non-negative value, non-admin users cannot execute a query if the number of transactions open on the current gateway node is already at the configured limit. #118933 +- Out-of-process SQL servers will now start exporting a new `sql.aggregated_livebytes` [metric](../v23.2/metrics.html). This metric gets updated once every 60 seconds by default, and its update interval can be configured via the `tenant_global_metrics_exporter_interval` [cluster setting](../v23.2/cluster-settings.html). #119371 +- Added support for index hints with [`INSERT`](../v23.2/insert.html) and [`UPSERT`](../v23.2/upsert.html) statements. This allows `INSERT ... ON CONFLICT` and `UPSERT` queries to use index hints in the same way they are already supported for [`UPDATE`](../v23.2/update.html) and [`DELETE`](../v23.2/delete.html) statements. #119601

Operational changes

-- Expanded the [`--include-range-info`](../v23.2/cockroach-debug-zip.html) flag to include problem ranges. This flag still defaults to `true`. [#119234][#119234] +- Expanded the [`--include-range-info`](../v23.2/cockroach-debug-zip.html) flag to include problem ranges. This flag still defaults to `true`. #119234 - In unredacted [debug zips](../v23.2/cockroach-debug-zip.html), the `crdb_internal.transaction_contention_events` table file has two new columns: - `waiting_stmt_query`: the query of the waiting statement. - - `blocking_txn_queries_unordered`: the unordered list of the blocking transaction's queries. [#118831][#118831] + - `blocking_txn_queries_unordered`: the unordered list of the blocking transaction's queries. #118831

Command-line changes

-- Updated the SQL shell help URL to point to the [`cockroach sql`](../v23.2/cockroach-sql.html) page. [#118994][#118994] -- [`cockroach debug tsdump`](../v23.2/cockroach-debug-tsdump.html) creates a `tsdump.yaml` file. The `tsdump` raw format automatically creates the YAML file in the default location `/tmp/tsdump.yaml`. Added a new flag `--yaml` that allows users to specify the path to create `tsdump.yaml` instead of using the default location. For example, `cockroach debug tsdump --host : \ --format raw --yaml=/some_path/tsdump.yaml > /some_path/tsdump.gob`.[#117741][#117741] +- Updated the SQL shell help URL to point to the [`cockroach sql`](../v23.2/cockroach-sql.html) page. #118994 +- [`cockroach debug tsdump`](../v23.2/cockroach-debug-tsdump.html) creates a `tsdump.yaml` file. The `tsdump` raw format automatically creates the YAML file in the default location `/tmp/tsdump.yaml`. Added a new flag `--yaml` that allows users to specify the path to create `tsdump.yaml` instead of using the default location. For example, `cockroach debug tsdump --host : \ --format raw --yaml=/some_path/tsdump.yaml > /some_path/tsdump.gob`.#117741

DB Console changes

-- Fixed a bug where a warning about the need to refresh data would remain displayed on the Active Executions view of the [Statements](../v23.2/ui-statements-page.html#active-executions-view) and [Transactions](../v23.2/ui-transactions-page.html#active-executions-view) pages despite enabling **Auto Refresh**. [#118703][#118703] -- Updated the [Statement Details page](../v23.2/ui-statements-page.html) to always show the entire selected period, instead of just the period that had data. [#118805][#118805] +- Fixed a bug where a warning about the need to refresh data would remain displayed on the Active Executions view of the [Statements](../v23.2/ui-statements-page.html#active-executions-view) and [Transactions](../v23.2/ui-transactions-page.html#active-executions-view) pages despite enabling **Auto Refresh**. #118703 +- Updated the [Statement Details page](../v23.2/ui-statements-page.html) to always show the entire selected period, instead of just the period that had data. #118805 - The [Overload dashboard](../v23.2/ui-overload-dashboard.html) now includes two additional graphs: - **Elastic CPU Utilization**: displays the CPU utilization by elastic work, compared to the limit set for elastic work. - - **Elastic CPU Exhausted Duration Per Second**: displays the duration of CPU exhaustion by elastic work, in microseconds. [#118896][#118896] -- The **Full Table/Index Scans** chart in the [SQL Metrics dashboard](../v23.2/ui-sql-dashboard.html) now shows the non-negative derivative of the number of full scans tracked. [#118860][#118860] + - **Elastic CPU Exhausted Duration Per Second**: displays the duration of CPU exhaustion by elastic work, in microseconds. #118896 +- The **Full Table/Index Scans** chart in the [SQL Metrics dashboard](../v23.2/ui-sql-dashboard.html) now shows the non-negative derivative of the number of full scans tracked. #118860

Bug fixes

@@ -48,26 +48,26 @@ Release Date: March 20, 2024 1. A transaction commits and removes its transaction record before its [intent](../v23.2/architecture/transaction-layer.html#writing) resolution is applied on the follower. 1. The follower's [closed timestamp](../v23.2/architecture/transaction-layer.html#closed-timestamps) has advanced past the transaction commit timestamp. 1. The rangefeed attempts to push the transaction to a new timestamp (at least 10 seconds after the transaction began). - 1. This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the [changefeed](../v23.2/how-does-an-enterprise-changefeed-work.html) to drop these events entirely, never emitting them. [#118413][#118413] -- Decommissioning replicas that are part of a mis-replicated range will no longer get stuck on a rebalance operation that was falsely determined to be unsafe. This bug was introduced in v23.1.0. [#118343][#118343] -- CockroachDB will no longer spam the logs with `unable to get CPU capacity` errors every 10 seconds when running outside of a CPU cgroup. [#118672][#118672] -- [`AUTO CREATE STATS`](../v23.2/cost-based-optimizer.html#table-statistics) jobs could previously lead to growth in an internal system table resulting in slower job-system related queries. [#118942][#118942] -- Fixed a bug that caused an inscrutable error when a [sequence](../v23.2/create-sequence.html) name allocated by `SERIAL` conflicted with an existing type name. [#118947][#118947] -- Fixed an internal error with a message like: `LeafTxn ... incompatible with locking request` that occurs when performing an update under [`READ COMMITTED` isolation](../v23.2/read-committed.html) that cascades to a table with multiple other foreign keys. [#118931][#118931] -- Fixed a bug where [`ALTER PRIMARY KEY`](../v23.2/alter-table.html#alter-primary-key) could fail with an error `non-nullable column with no value! Index scanned ..` when validating recreated [secondary indexes]({% link v23.2/schema-design-indexes.md %}). [#118974][#118974] -- Fixed a bug where [`COMMENT ON`](../v23.2/comment-on.html) statements could fail with an `unexpected value` error if multiple `COMMENT` statements were running concurrently. [#119020][#119020] -- Previously, in certain cases, using virtual tables such as `crdb_internal.system_jobs` could result in the internal error `attempting to append refresh spans after the tracked timestamp has moved forward`. This is now fixed. The bug was introduced in CockroachDB v23.1. [#119184][#119184] -- Fixed a bug where operations on the `crdb_internal.leases` table could cause a node to become unavailable due to a deadlock in the leasing subsystem. [#119341][#119341] -- Fixed a bug where rangefeed resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a [range merge](../v23.2/architecture/distribution-layer.html#range-merges). [#119541][#119541] -- Fixed a bug where running a [changefeed]({% link v23.2/change-data-capture-overview.md %}) that targets a table with a user-defined type column and with the [`envelope` option]({% link v23.2/create-changefeed.md %}#envelope) set to any value other than `wrapped` would cause a node panic due to a nil dereference. [#119738][#119738] -- Fixed a rare panic that could happen during a `pg_dump` import that contains a function that has a subquery in one of its arguments, like `SELECT addgeometrycolumn(...)`. Now, attempting to import a `pg_dump` with such a function results in an expected error. [#118612][#118612] -- Users with the [VIEWACTIVITY](../v23.2/security-reference/authorization.html#supported-privileges) privilege can now request statement bundles using `crdb_internal.request_statement_bundle` or through the DB Console [SQL Activity](../v23.2/security-reference/authorization.html#supported-privileges) page. [#118809][#118809] -- Fixed a bug in changefeed [webhook sinks](../v23.2/changefeed-sinks.html#webhook-sink) where the HTTP request body may not be initialized on retries, resulting in the error `http: ContentLength=... with Body length 0`. [#119496][#119496] -- Fixed a bug that caused internal errors when executing an [`EXPORT`](../v23.2/export.html) statement. [#119711][#119711] -- Fixed a bug that could lead to schema changes with a large number of descriptors doing full table scans on `system.leases`. [#119464][#119464] -- Fixed a bug where rangefeed resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a range merge. This bug was introduced in v23.2.1. [#119702][#119702] -- Fixed a bug that occurred when using [`ALTER TABLE`](../v23.2/alter-table.html) to drop and re-add a [`CHECK`](../v23.2/check.html) constraint with the same name. [#120076][#120076] -- Fixed a bug that caused a slow memory leak that can accumulate when opening many new connections. The bug was present in v22.2.9+ and v23.1+ versions. [#120245][#120245] + 1. This may cause the rangefeed to prematurely emit a checkpoint before emitting writes at lower timestamps, which in turn may cause the [changefeed](../v23.2/how-does-an-enterprise-changefeed-work.html) to drop these events entirely, never emitting them. #118413 +- Decommissioning replicas that are part of a mis-replicated range will no longer get stuck on a rebalance operation that was falsely determined to be unsafe. This bug was introduced in v23.1.0. #118343 +- CockroachDB will no longer spam the logs with `unable to get CPU capacity` errors every 10 seconds when running outside of a CPU cgroup. #118672 +- [`AUTO CREATE STATS`](../v23.2/cost-based-optimizer.html#table-statistics) jobs could previously lead to growth in an internal system table resulting in slower job-system related queries. #118942 +- Fixed a bug that caused an inscrutable error when a [sequence](../v23.2/create-sequence.html) name allocated by `SERIAL` conflicted with an existing type name. #118947 +- Fixed an internal error with a message like: `LeafTxn ... incompatible with locking request` that occurs when performing an update under [`READ COMMITTED` isolation](../v23.2/read-committed.html) that cascades to a table with multiple other foreign keys. #118931 +- Fixed a bug where [`ALTER PRIMARY KEY`](../v23.2/alter-table.html#alter-primary-key) could fail with an error `non-nullable column with no value! Index scanned ..` when validating recreated [secondary indexes]({% link v23.2/schema-design-indexes.md %}). #118974 +- Fixed a bug where [`COMMENT ON`](../v23.2/comment-on.html) statements could fail with an `unexpected value` error if multiple `COMMENT` statements were running concurrently. #119020 +- Previously, in certain cases, using virtual tables such as `crdb_internal.system_jobs` could result in the internal error `attempting to append refresh spans after the tracked timestamp has moved forward`. This is now fixed. The bug was introduced in CockroachDB v23.1. #119184 +- Fixed a bug where operations on the `crdb_internal.leases` table could cause a node to become unavailable due to a deadlock in the leasing subsystem. #119341 +- Fixed a bug where rangefeed resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a [range merge](../v23.2/architecture/distribution-layer.html#range-merges). #119541 +- Fixed a bug where running a [changefeed]({% link v23.2/change-data-capture-overview.md %}) that targets a table with a user-defined type column and with the [`envelope` option]({% link v23.2/create-changefeed.md %}#envelope) set to any value other than `wrapped` would cause a node panic due to a nil dereference. #119738 +- Fixed a rare panic that could happen during a `pg_dump` import that contains a function that has a subquery in one of its arguments, like `SELECT addgeometrycolumn(...)`. Now, attempting to import a `pg_dump` with such a function results in an expected error. #118612 +- Users with the [VIEWACTIVITY](../v23.2/security-reference/authorization.html#supported-privileges) privilege can now request statement bundles using `crdb_internal.request_statement_bundle` or through the DB Console [SQL Activity](../v23.2/security-reference/authorization.html#supported-privileges) page. #118809 +- Fixed a bug in changefeed [webhook sinks](../v23.2/changefeed-sinks.html#webhook-sink) where the HTTP request body may not be initialized on retries, resulting in the error `http: ContentLength=... with Body length 0`. #119496 +- Fixed a bug that caused internal errors when executing an [`EXPORT`](../v23.2/export.html) statement. #119711 +- Fixed a bug that could lead to schema changes with a large number of descriptors doing full table scans on `system.leases`. #119464 +- Fixed a bug where rangefeed resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a range merge. This bug was introduced in v23.2.1. #119702 +- Fixed a bug that occurred when using [`ALTER TABLE`](../v23.2/alter-table.html) to drop and re-add a [`CHECK`](../v23.2/check.html) constraint with the same name. #120076 +- Fixed a bug that caused a slow memory leak that can accumulate when opening many new connections. The bug was present in v22.2.9+ and v23.1+ versions. #120245

Contributors

@@ -75,39 +75,3 @@ This release includes 118 merged PRs by 42 authors. -[#117741]: https://github.com/cockroachdb/cockroach/pull/117741 -[#118343]: https://github.com/cockroachdb/cockroach/pull/118343 -[#118413]: https://github.com/cockroachdb/cockroach/pull/118413 -[#118612]: https://github.com/cockroachdb/cockroach/pull/118612 -[#118672]: https://github.com/cockroachdb/cockroach/pull/118672 -[#118703]: https://github.com/cockroachdb/cockroach/pull/118703 -[#118764]: https://github.com/cockroachdb/cockroach/pull/118764 -[#118805]: https://github.com/cockroachdb/cockroach/pull/118805 -[#118809]: https://github.com/cockroachdb/cockroach/pull/118809 -[#118831]: https://github.com/cockroachdb/cockroach/pull/118831 -[#118847]: https://github.com/cockroachdb/cockroach/pull/118847 -[#118860]: https://github.com/cockroachdb/cockroach/pull/118860 -[#118896]: https://github.com/cockroachdb/cockroach/pull/118896 -[#118931]: https://github.com/cockroachdb/cockroach/pull/118931 -[#118933]: https://github.com/cockroachdb/cockroach/pull/118933 -[#118942]: https://github.com/cockroachdb/cockroach/pull/118942 -[#118947]: https://github.com/cockroachdb/cockroach/pull/118947 -[#118974]: https://github.com/cockroachdb/cockroach/pull/118974 -[#118994]: https://github.com/cockroachdb/cockroach/pull/118994 -[#119020]: https://github.com/cockroachdb/cockroach/pull/119020 -[#119184]: https://github.com/cockroachdb/cockroach/pull/119184 -[#119234]: https://github.com/cockroachdb/cockroach/pull/119234 -[#119259]: https://github.com/cockroachdb/cockroach/pull/119259 -[#119341]: https://github.com/cockroachdb/cockroach/pull/119341 -[#119371]: https://github.com/cockroachdb/cockroach/pull/119371 -[#119399]: https://github.com/cockroachdb/cockroach/pull/119399 -[#119464]: https://github.com/cockroachdb/cockroach/pull/119464 -[#119496]: https://github.com/cockroachdb/cockroach/pull/119496 -[#119541]: https://github.com/cockroachdb/cockroach/pull/119541 -[#119601]: https://github.com/cockroachdb/cockroach/pull/119601 -[#119702]: https://github.com/cockroachdb/cockroach/pull/119702 -[#119711]: https://github.com/cockroachdb/cockroach/pull/119711 -[#119738]: https://github.com/cockroachdb/cockroach/pull/119738 -[#119768]: https://github.com/cockroachdb/cockroach/pull/119768 -[#120076]: https://github.com/cockroachdb/cockroach/pull/120076 -[#120245]: https://github.com/cockroachdb/cockroach/pull/120245 diff --git a/src/current/_includes/releases/v23.2/v23.2.4.md b/src/current/_includes/releases/v23.2/v23.2.4.md index f6b48661f8d..e1721bc9f7f 100644 --- a/src/current/_includes/releases/v23.2/v23.2.4.md +++ b/src/current/_includes/releases/v23.2/v23.2.4.md @@ -6,26 +6,26 @@ Release Date: April 11, 2024

SQL language changes

-- Mutation statements such as [`UPDATE`](../v23.2/update.html) and [`DELETE`](../v23.2/delete.html) as well as locking statements such as [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) are not allowed in [read-only transactions](../v23.2/set-transaction.html#parameters) or [`AS OF SYSTEM TIME` transactions](../v23.2/set-transaction.html#parameters). Previously, a bug existed where mutation statements and locking statements in implicit single-statement transactions using AS OF SYSTEM TIME were allowed. [#120158][#120158] -- The new cluster setting [`sql.stats.virtual_computed_columns.enabled`](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled), when enabled, allows the collection of [table statistics](../v23.2/show-statistics.html) on [virtual computed columns](../v23.2/computed-columns.html). [#120933][#120933] -- The new [session variable](../v23.2/session-variables.html) `optimizer_use_virtual_computed_column_stats`, when enabled, configures the [cost-based optimizer](../v23.2/cost-based-optimizer.html) to use [table statistics](../v23.2/show-statistics.html) on [virtual computed columns](../v23.2/computed-columns.html). [#121329] +- Mutation statements such as [`UPDATE`](../v23.2/update.html) and [`DELETE`](../v23.2/delete.html) as well as locking statements such as [`SELECT FOR UPDATE`](../v23.2/select-for-update.html) are not allowed in [read-only transactions](../v23.2/set-transaction.html#parameters) or [`AS OF SYSTEM TIME` transactions](../v23.2/set-transaction.html#parameters). Previously, a bug existed where mutation statements and locking statements in implicit single-statement transactions using AS OF SYSTEM TIME were allowed. #120158 +- The new cluster setting [`sql.stats.virtual_computed_columns.enabled`](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled), when enabled, allows the collection of [table statistics](../v23.2/show-statistics.html) on [virtual computed columns](../v23.2/computed-columns.html). #120933 +- The new [session variable](../v23.2/session-variables.html) `optimizer_use_virtual_computed_column_stats`, when enabled, configures the [cost-based optimizer](../v23.2/cost-based-optimizer.html) to use [table statistics](../v23.2/show-statistics.html) on [virtual computed columns](../v23.2/computed-columns.html). #121329

DB Console changes

-- Fixed an issue where clusters with multiple [stores](../v23.2/cockroach-start.html#store) per node could list inaccurate region and node information on the [**Databases** page](../v23.2/ui-databases-page.html#databases). [#120212][#120212] -- Users will no longer see [views](../v23.2/views.html) displayed on the [**Databases** page](../v23.2/ui-databases-page.html#databases). Previously views would be listed with no information, only displaying errors. [#120214][#120214] +- Fixed an issue where clusters with multiple [stores](../v23.2/cockroach-start.html#store) per node could list inaccurate region and node information on the [**Databases** page](../v23.2/ui-databases-page.html#databases). #120212 +- Users will no longer see [views](../v23.2/views.html) displayed on the [**Databases** page](../v23.2/ui-databases-page.html#databases). Previously views would be listed with no information, only displaying errors. #120214

Bug fixes

-- Previously, on long-running [sessions](../v23.2/show-sessions.html) that issue many (hundreds of thousands or more) [transactions](../v23.2/transactions.html), CockroachDB's internal memory accounting system, the limit for which is configured via the [`--max-sql-memory` flag](../v23.2/cockroach-start.html#general)) could leak. This bug, in turn, could result in the error message `"root: memory budget exceeded"` for other queries. The bug was present in v23.2.3 and is now fixed. [#121875][#121875] -- Previously, [altering a table's locality](../v23.2/alter-table.html#set-locality) from [`REGIONAL BY ROW`](../v23.2/alter-table.html#set-the-table-locality-to-regional-by-row) to [`REGIONAL BY TABLE`](../v23.2/alter-table.html#regional-by-table) could cause [leaseholders](../v23.2/architecture/replication-layer.html#leases) to never move to the [database's primary region](../v23.2/alter-database.html#set-primary-region). This is now fixed. [#118794][#118794] -- A user with the [`VIEWACTIVITYREDACTED` privilege](../v23.2/security-reference/authorization.html#privileges) can no longer see constants inside of queries that originate from other users in the [`SHOW SESSIONS` result](../v23.2/show-sessions.html#response). Previously, this redaction did not occur. [#119884][#119884] -- Previously, the [`SHOW STATEMENTS`](../v23.2/show-statements.html) and the [`SHOW QUERIES`](../v23.2/show-statements.html#aliases) commands incorrectly required the user to have the [`VIEWACTIVITY` or `VIEWACTIVITYREDACTED` privilege](../v23.2/security-reference/authorization.html#privileges). However, a user always should be able to view their own queries, even without these privileges. This is now fixed. [#119884][#119884] -- Fixed a bug where [`RESTORE`](../v23.2/restore.html) on certain [`BACKUP`s](../v23.2/backup.html) would open a very large number of connections to the backup storage provider. [#119883][#119883] -- Fixed a bug that occurred when using [`ALTER TABLE`](../v23.2/alter-table.html) to [drop](../v23.2/alter-table.html#drop-constraint) and [add](../v23.2/alter-table.html#add-constraint) back a [`CHECK` constraint](../v23.2/check.html) with the same name. [#120075][#120075] -- Fixed a bug in which it was possible to set [session variable](../v23.2/session-variables.html) `transaction_read_only` to `false` during an [`AS OF SYSTEM TIME` transaction](../v23.2/set-transaction.html#parameters). [#120158][#120158] -- Fixed a bug where some files were not closed when inspecting [backup metadata](../v23.2/backup-architecture.html#metadata-writing-phase) during [`BACKUP`](../v23.2/backup.html) and [`RESTORE`](../v23.2/restore.html). [#119635][#119635] -- Fixed an intermittent page crash on the [**Schema Insights**](../cockroachcloud/insights-page.html#schema-insights-tab) view. [#120210][#120210] +- Previously, on long-running [sessions](../v23.2/show-sessions.html) that issue many (hundreds of thousands or more) [transactions](../v23.2/transactions.html), CockroachDB's internal memory accounting system, the limit for which is configured via the [`--max-sql-memory` flag](../v23.2/cockroach-start.html#general)) could leak. This bug, in turn, could result in the error message `"root: memory budget exceeded"` for other queries. The bug was present in v23.2.3 and is now fixed. #121875 +- Previously, [altering a table's locality](../v23.2/alter-table.html#set-locality) from [`REGIONAL BY ROW`](../v23.2/alter-table.html#set-the-table-locality-to-regional-by-row) to [`REGIONAL BY TABLE`](../v23.2/alter-table.html#regional-by-table) could cause [leaseholders](../v23.2/architecture/replication-layer.html#leases) to never move to the [database's primary region](../v23.2/alter-database.html#set-primary-region). This is now fixed. #118794 +- A user with the [`VIEWACTIVITYREDACTED` privilege](../v23.2/security-reference/authorization.html#privileges) can no longer see constants inside of queries that originate from other users in the [`SHOW SESSIONS` result](../v23.2/show-sessions.html#response). Previously, this redaction did not occur. #119884 +- Previously, the [`SHOW STATEMENTS`](../v23.2/show-statements.html) and the [`SHOW QUERIES`](../v23.2/show-statements.html#aliases) commands incorrectly required the user to have the [`VIEWACTIVITY` or `VIEWACTIVITYREDACTED` privilege](../v23.2/security-reference/authorization.html#privileges). However, a user always should be able to view their own queries, even without these privileges. This is now fixed. #119884 +- Fixed a bug where [`RESTORE`](../v23.2/restore.html) on certain [`BACKUP`s](../v23.2/backup.html) would open a very large number of connections to the backup storage provider. #119883 +- Fixed a bug that occurred when using [`ALTER TABLE`](../v23.2/alter-table.html) to [drop](../v23.2/alter-table.html#drop-constraint) and [add](../v23.2/alter-table.html#add-constraint) back a [`CHECK` constraint](../v23.2/check.html) with the same name. #120075 +- Fixed a bug in which it was possible to set [session variable](../v23.2/session-variables.html) `transaction_read_only` to `false` during an [`AS OF SYSTEM TIME` transaction](../v23.2/set-transaction.html#parameters). #120158 +- Fixed a bug where some files were not closed when inspecting [backup metadata](../v23.2/backup-architecture.html#metadata-writing-phase) during [`BACKUP`](../v23.2/backup.html) and [`RESTORE`](../v23.2/restore.html). #119635 +- Fixed an intermittent page crash on the [**Schema Insights**](../cockroachcloud/insights-page.html#schema-insights-tab) view. #120210
@@ -35,22 +35,3 @@ This release includes 65 merged PRs by 37 authors
-[#118794]: https://github.com/cockroachdb/cockroach/pull/118794 -[#119635]: https://github.com/cockroachdb/cockroach/pull/119635 -[#119768]: https://github.com/cockroachdb/cockroach/pull/119768 -[#119828]: https://github.com/cockroachdb/cockroach/pull/119828 -[#119854]: https://github.com/cockroachdb/cockroach/pull/119854 -[#119883]: https://github.com/cockroachdb/cockroach/pull/119883 -[#119884]: https://github.com/cockroachdb/cockroach/pull/119884 -[#119946]: https://github.com/cockroachdb/cockroach/pull/119946 -[#120011]: https://github.com/cockroachdb/cockroach/pull/120011 -[#120075]: https://github.com/cockroachdb/cockroach/pull/120075 -[#120158]: https://github.com/cockroachdb/cockroach/pull/120158 -[#120210]: https://github.com/cockroachdb/cockroach/pull/120210 -[#120212]: https://github.com/cockroachdb/cockroach/pull/120212 -[#120214]: https://github.com/cockroachdb/cockroach/pull/120214 -[#120243]: https://github.com/cockroachdb/cockroach/pull/120243 -[#120396]: https://github.com/cockroachdb/cockroach/pull/120396 -[#120933]: https://github.com/cockroachdb/cockroach/pull/120933 -[#121329]: https://github.com/cockroachdb/cockroach/pull/121329 -[#121875]: https://github.com/cockroachdb/cockroach/pull/121875 diff --git a/src/current/_includes/releases/v23.2/v23.2.5.md b/src/current/_includes/releases/v23.2/v23.2.5.md index a5fc11c5696..5d7c78dd149 100644 --- a/src/current/_includes/releases/v23.2/v23.2.5.md +++ b/src/current/_includes/releases/v23.2/v23.2.5.md @@ -6,35 +6,35 @@ Release Date: May 7, 2024

SQL language changes

-- The new [cluster setting](../v23.2/cluster-settings.html) [`sql.stats.virtual_computed_columns.enabled`](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled) enables collection of [table statistics](../v23.2/cost-based-optimizer.html#table-statistics) on virtual [computed columns](../v23.2/computed-columns.html). [#120923][#120923] -- The new [session variable](../v23.2/session-variables.html) `optimizer_use_virtual_computed_column_stats` configures the [optimizer](../v23.2/cost-based-optimizer.html) to consider table statistics on virtual computed columns. [#121179][#121179] -- The new `FORCE_INVERTED_INDEX` [hint](../v23.2/indexes.html#selection) configures the [optimizer](../v23.2/cost-based-optimizer.html) to prefer a query plan scan over any inverted index of the hinted table. If no such query plan can be generated, an error is logged. [#122300][#122300] -- The [optimizer](../v23.2/cost-based-optimizer.html) can now plan constrained scans over [partial indexes](../v23.2/partial-indexes.html) in more cases, particularly on partial indexes with predicates referencing virtual [computed columns](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled). [#123408][#123408] +- The new [cluster setting](../v23.2/cluster-settings.html) [`sql.stats.virtual_computed_columns.enabled`](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled) enables collection of [table statistics](../v23.2/cost-based-optimizer.html#table-statistics) on virtual [computed columns](../v23.2/computed-columns.html). #120923 +- The new [session variable](../v23.2/session-variables.html) `optimizer_use_virtual_computed_column_stats` configures the [optimizer](../v23.2/cost-based-optimizer.html) to consider table statistics on virtual computed columns. #121179 +- The new `FORCE_INVERTED_INDEX` [hint](../v23.2/indexes.html#selection) configures the [optimizer](../v23.2/cost-based-optimizer.html) to prefer a query plan scan over any inverted index of the hinted table. If no such query plan can be generated, an error is logged. #122300 +- The [optimizer](../v23.2/cost-based-optimizer.html) can now plan constrained scans over [partial indexes](../v23.2/partial-indexes.html) in more cases, particularly on partial indexes with predicates referencing virtual [computed columns](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled). #123408

Operational changes

-- A minimum [Raft](../v23.2/architecture/replication-layer.html#raft) scheduler concurrency is now enforced per [store](../v23.2/cockroach-start.html#storage) so that a node with many stores does not spread workers too thinly. This avoids high scheduler latency across [replicas](../v23.2/architecture/glossary.html#replica) on a store when load is imbalanced. [#120798][#120798] -- A [changefeed](../v23.2/change-data-capture-overview.html) optimization to reduce duplicates during aggregator restarts has been disabled due to poor performance. [#123596][#123596] +- A minimum [Raft](../v23.2/architecture/replication-layer.html#raft) scheduler concurrency is now enforced per [store](../v23.2/cockroach-start.html#storage) so that a node with many stores does not spread workers too thinly. This avoids high scheduler latency across [replicas](../v23.2/architecture/glossary.html#replica) on a store when load is imbalanced. #120798 +- A [changefeed](../v23.2/change-data-capture-overview.html) optimization to reduce duplicates during aggregator restarts has been disabled due to poor performance. #123596

DB Console changes

-- The **Commit Latency** chart in the [Changefeed Dashboard](../v23.2/ui-cdc-dashboard.html) now aggregates by max instead of by sum for multi-node changefeeds. This more accurately reflects the amount of time for events to be acknowledged by the downstream sink. [#121235][#121235] +- The **Commit Latency** chart in the [Changefeed Dashboard](../v23.2/ui-cdc-dashboard.html) now aggregates by max instead of by sum for multi-node changefeeds. This more accurately reflects the amount of time for events to be acknowledged by the downstream sink. #121235

Bug fixes

-- Fixed a slow memory leak when opening many new [connections](../v23.2/connect-to-the-database.html). This bug was introduced in v22.2.9 and v23.1.0. [#121055][#121055] -- Fixed a bug that occurred when using [`ALTER TABLE`](../v23.2/alter-table.html) to drop and re-add a [`CHECK` constraint](../v23.2/check.html) with the same name. [#121055][#121055] -- [Sequence](../v23.2/create-sequence.html) options `MINVALUE` and `MAXVALUE` automatically adjust to new types bounds. This mirrors the behavior of PostgreSQL. [#121309][#121309] -- Fixed a bug that could prevent timeseries graphs shown on the DB Console SQL Activity [Statement Details](../v23.2/ui-statements-page.html) page from rendering correctly when specifying a custom time range. [#121383][#121383] +- Fixed a slow memory leak when opening many new [connections](../v23.2/connect-to-the-database.html). This bug was introduced in v22.2.9 and v23.1.0. #121055 +- Fixed a bug that occurred when using [`ALTER TABLE`](../v23.2/alter-table.html) to drop and re-add a [`CHECK` constraint](../v23.2/check.html) with the same name. #121055 +- [Sequence](../v23.2/create-sequence.html) options `MINVALUE` and `MAXVALUE` automatically adjust to new types bounds. This mirrors the behavior of PostgreSQL. #121309 +- Fixed a bug that could prevent timeseries graphs shown on the DB Console SQL Activity [Statement Details](../v23.2/ui-statements-page.html) page from rendering correctly when specifying a custom time range. #121383 - Fixed a bug present since at least v21.1 that could lead to incorrect evaluation of an `IN` expression with: - [`INT2` or `INT4`](../v23.2/int.html) type on the left side, and - Values on the right side that are outside of the range of the left side. - [#121953][#121953] -- Fixed a leak in reported memory usage (not the actual memory usage) by the internal memory accounting system, the limit for which is configured via the [`--max-sql-memory`](../v23.2/cockroach-start.html#flags) flag when a long-running sessions issues hundreds of thousands or more [transactions](../v23.2/transactions.html). This reporting bug could cause `root: memory budget exceeded` errors for other queries. The bug was introduced in v23.1.17 and v23.2.3. [#121950][#121950] -- Fixed a bug introduced in v23.2.4 that could prevent collection of [table statistics](../v23.2/cost-based-optimizer.html#table-statistics) on tables that have on virtual [computed columns](../v23.2/computed-columns.html) of [user-defined type](../v23.2/create-type.html) when the newly-introduced [cluster setting](../v23.2/cluster-settings.html) [`sql.stats.virtual_computed_columns.enabled`](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled) is set to `true` (defaults to `false`). The setting was introduced in v23.2.4 and is disabled by default. [#122319][#122319] -- Fixed a bug where a [`GRANT ... ON ALL TABLES`](../v23.2/grant.html) statement could fail if a sequence existed that did not support the [privilege](../v23.2/security-reference/authorization.html#privileges) being granted. [#122034][#122034] -- Fixed an existing bug where an unused value cannot be dropped from an [`ENUM`](../v23.2/enum.html) if the`ENUM` itself is referenced by a [user-defined function](../v23.2/user-defined-functions.html). A value can now be dropped from an`ENUM` as long as the value itself is not being referenced by any other data element, including a user-defined function. [#121237][#121237] + #121953 +- Fixed a leak in reported memory usage (not the actual memory usage) by the internal memory accounting system, the limit for which is configured via the [`--max-sql-memory`](../v23.2/cockroach-start.html#flags) flag when a long-running sessions issues hundreds of thousands or more [transactions](../v23.2/transactions.html). This reporting bug could cause `root: memory budget exceeded` errors for other queries. The bug was introduced in v23.1.17 and v23.2.3. #121950 +- Fixed a bug introduced in v23.2.4 that could prevent collection of [table statistics](../v23.2/cost-based-optimizer.html#table-statistics) on tables that have on virtual [computed columns](../v23.2/computed-columns.html) of [user-defined type](../v23.2/create-type.html) when the newly-introduced [cluster setting](../v23.2/cluster-settings.html) [`sql.stats.virtual_computed_columns.enabled`](../v23.2/cluster-settings.html#setting-sql-stats-virtual-computed-columns-enabled) is set to `true` (defaults to `false`). The setting was introduced in v23.2.4 and is disabled by default. #122319 +- Fixed a bug where a [`GRANT ... ON ALL TABLES`](../v23.2/grant.html) statement could fail if a sequence existed that did not support the [privilege](../v23.2/security-reference/authorization.html#privileges) being granted. #122034 +- Fixed an existing bug where an unused value cannot be dropped from an [`ENUM`](../v23.2/enum.html) if the`ENUM` itself is referenced by a [user-defined function](../v23.2/user-defined-functions.html). A value can now be dropped from an`ENUM` as long as the value itself is not being referenced by any other data element, including a user-defined function. #121237
@@ -44,22 +44,3 @@ This release includes 79 merged PRs by 33 authors.
-[#120798]: https://github.com/cockroachdb/cockroach/pull/120798 -[#120923]: https://github.com/cockroachdb/cockroach/pull/120923 -[#121055]: https://github.com/cockroachdb/cockroach/pull/121055 -[#121179]: https://github.com/cockroachdb/cockroach/pull/121179 -[#121235]: https://github.com/cockroachdb/cockroach/pull/121235 -[#121237]: https://github.com/cockroachdb/cockroach/pull/121237 -[#121309]: https://github.com/cockroachdb/cockroach/pull/121309 -[#121383]: https://github.com/cockroachdb/cockroach/pull/121383 -[#121950]: https://github.com/cockroachdb/cockroach/pull/121950 -[#121953]: https://github.com/cockroachdb/cockroach/pull/121953 -[#122034]: https://github.com/cockroachdb/cockroach/pull/122034 -[#122162]: https://github.com/cockroachdb/cockroach/pull/122162 -[#122229]: https://github.com/cockroachdb/cockroach/pull/122229 -[#122300]: https://github.com/cockroachdb/cockroach/pull/122300 -[#122319]: https://github.com/cockroachdb/cockroach/pull/122319 -[#123408]: https://github.com/cockroachdb/cockroach/pull/123408 -[#123596]: https://github.com/cockroachdb/cockroach/pull/123596 -[622cd1c76]: https://github.com/cockroachdb/cockroach/commit/622cd1c76 -[894f152fa]: https://github.com/cockroachdb/cockroach/commit/894f152fa diff --git a/src/current/_includes/releases/v23.2/v23.2.6.md b/src/current/_includes/releases/v23.2/v23.2.6.md index 36dd1927e23..d230c848387 100644 --- a/src/current/_includes/releases/v23.2/v23.2.6.md +++ b/src/current/_includes/releases/v23.2/v23.2.6.md @@ -11,67 +11,67 @@ Release Date: June 11, 2024 - The cluster settings `changefeed.frontier_checkpoint_frequency` and `low changefeed.frontier_highwater_lag_checkpoint_threshold` were set low, which resulted in the initial scan taking many multiples of the configured frequency to complete. - There were multiple target tables with significant differences in row counts in one changefeed. - The changefeed target tables were large with many ranges. - - The initial scan took a long time to complete (an hour or longer). [#123966][#123966] + - The initial scan took a long time to complete (an hour or longer). #123966

SQL language changes

-- Updated the [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) responses to display the `object_type` and `object_name`, which has replaced the `relation_name` column. [#122822][#122822] -- Added [external connection]({% link v23.2/create-external-connection.md %}) granted privileges to the [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) command. [#122822][#122822] +- Updated the [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) responses to display the `object_type` and `object_name`, which has replaced the `relation_name` column. #122822 +- Added [external connection]({% link v23.2/create-external-connection.md %}) granted privileges to the [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) command. #122822 - Introduced three new [cluster settings]({% link v23.2/cluster-settings.md %}) for controlling table statistics forecasting: - [`sql.stats.forecasts.min_observations`]({% link v23.2/cluster-settings.md %}) is the minimum number of observed statistics required to produce a forecast. - [`sql.stats.forecasts.min_goodness_of_fit`]({% link v23.2/cluster-settings.md %}) is the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast. - - [`sql.stats.forecasts.max_decrease`]({% link v23.2/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. [#122458][#122458] -- Added a new [session setting]({% link v23.2/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.2/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. [#123100][#123100] -- The [optimizer]({% link v23.2/cost-based-optimizer.md %}) can now plan constrained scans over partial indexes in more cases, particularly on [partial indexes]({% link v23.2/partial-indexes.md %}) with predicates referencing [virtual computed columns]({% link v23.2/computed-columns.md %}). [#123469][#123469] -- The row-level TTL setting [`ttl_delete_rate_limit`]({% link v23.2/row-level-ttl.md %}) is now set to `100` by default, which sets the rate limit for deleting expired rows to `100`. [#124353][#124353] + - [`sql.stats.forecasts.max_decrease`]({% link v23.2/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. #122458 +- Added a new [session setting]({% link v23.2/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v23.2/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. #123100 +- The [optimizer]({% link v23.2/cost-based-optimizer.md %}) can now plan constrained scans over partial indexes in more cases, particularly on [partial indexes]({% link v23.2/partial-indexes.md %}) with predicates referencing [virtual computed columns]({% link v23.2/computed-columns.md %}). #123469 +- The row-level TTL setting [`ttl_delete_rate_limit`]({% link v23.2/row-level-ttl.md %}) is now set to `100` by default, which sets the rate limit for deleting expired rows to `100`. #124353

Operational changes

- Two new metrics track the status of the SQL Activity Update job, which pre-aggregates top K information within the SQL statistics subsytem and writes the results to `system.statement_activity` and `system.transaction_activity`: - `sql.stats.activity.updates.successful`: Number of successful updates made by the SQL activity updater job. - - `sql.stats.activity.update.latency`: The latency of updates made by the SQL activity updater job. Includes failed update attempts. [#123960][#123960] -- Added a new counter [metric]({% link v23.2/metrics.md %}), `sql.stats.flush.done_signals.ignored`, that tracks the number of times the SQL activity update job has ignored the signal that indicates that a flush has completed. This metric may indicate that the SQL activity update job is taking longer than expected to complete. [#123960][#123960] -- Added a new counter [metric]({% link v23.2/metrics.md %}), `sql.stats.activity.updates.failed`, to measure the number of update attempts made by the SQL activity update job that failed with errors. The SQL activity update job is used to pre-aggregate top K information within the SQL stats subsystem and write the results to `system.statement_activity` and `system.transaction_activity`. [#123960][#123960] -- Added a new counter [metric]({% link v23.2/metrics.md %}), `sql.stats.flush.fingerprint.count`, that tracks the number of unique statement and transaction fingerprints included in the SQL stats flush. [#123960][#123960] -- Added the `sql.pgwire.pipeline.count` [metric]({% link v23.2/metrics.md %}), which measures how many wire protocol commands have been received by the server, but have not yet started processing. This metric will only grow if clients are using the [pipeline mode](https://www.postgresql.org/docs/current/libpq-pipeline-mode.html) of the PostgreSQL wire protocol. [#124260][#124260] -- The `client_authentication_ok` and `client_session_end` events are now logged to the [`SESSIONS` log channel]({% link v23.2/logging-use-cases.md %}#sessions) unconditionally. Previously, these would only be logged if the `server.auth_log.sql_sessions.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) was set to `true`. All other `SESSIONS` log messages are still only logged if `server.auth_log.sql_sessions.enabled` or `server.auth_log.sql_connections.enabled` are set to `true`. To not show `client_authentication_ok` and `client_session_end` events, disable the `SESSIONS` log channel entirely. [#124374][#124374] + - `sql.stats.activity.update.latency`: The latency of updates made by the SQL activity updater job. Includes failed update attempts. #123960 +- Added a new counter [metric]({% link v23.2/metrics.md %}), `sql.stats.flush.done_signals.ignored`, that tracks the number of times the SQL activity update job has ignored the signal that indicates that a flush has completed. This metric may indicate that the SQL activity update job is taking longer than expected to complete. #123960 +- Added a new counter [metric]({% link v23.2/metrics.md %}), `sql.stats.activity.updates.failed`, to measure the number of update attempts made by the SQL activity update job that failed with errors. The SQL activity update job is used to pre-aggregate top K information within the SQL stats subsystem and write the results to `system.statement_activity` and `system.transaction_activity`. #123960 +- Added a new counter [metric]({% link v23.2/metrics.md %}), `sql.stats.flush.fingerprint.count`, that tracks the number of unique statement and transaction fingerprints included in the SQL stats flush. #123960 +- Added the `sql.pgwire.pipeline.count` [metric]({% link v23.2/metrics.md %}), which measures how many wire protocol commands have been received by the server, but have not yet started processing. This metric will only grow if clients are using the [pipeline mode](https://www.postgresql.org/docs/current/libpq-pipeline-mode.html) of the PostgreSQL wire protocol. #124260 +- The `client_authentication_ok` and `client_session_end` events are now logged to the [`SESSIONS` log channel]({% link v23.2/logging-use-cases.md %}#sessions) unconditionally. Previously, these would only be logged if the `server.auth_log.sql_sessions.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) was set to `true`. All other `SESSIONS` log messages are still only logged if `server.auth_log.sql_sessions.enabled` or `server.auth_log.sql_connections.enabled` are set to `true`. To not show `client_authentication_ok` and `client_session_end` events, disable the `SESSIONS` log channel entirely. #124374

DB Console changes

-- The [**Database**]({% link v23.2/ui-databases-page.md %}) details and **Table** details pages now display the correct stats in the **Table Stats Last Updated**. [#122815][#122815] -- Viewing the [**SQL Activity**]({% link v23.2/ui-statements-page.md %}#active-executions-view) sorted by `% of Runtime` now correctly sorts entries by the runtime amount. [#123901][#123901] +- The [**Database**]({% link v23.2/ui-databases-page.md %}) details and **Table** details pages now display the correct stats in the **Table Stats Last Updated**. #122815 +- Viewing the [**SQL Activity**]({% link v23.2/ui-statements-page.md %}#active-executions-view) sorted by `% of Runtime` now correctly sorts entries by the runtime amount. #123901

Bug fixes

-- Fixed a bug where [client certificate authentication]({% link v23.2/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.2/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.2/connection-parameters.md %}). [#122749][#122749] -- Fixed a bug where the [row-based execution engine]({% link v23.2/architecture/sql-layer.md %}#query-execution) could drop a [`LIMIT`]({% link v23.2/limit-offset.md %}) clause when there was an [`ORDER BY`]({% link v23.2/order-by.md %}) clause, and the ordering was partially provided by an input operator. For example, this bug could occur with an ordering such as `ORDER BY a, b` when the scanned index was only ordered on column `a`. The impact of this bug was that more rows may have been returned than specified by the `LIMIT` clause. This bug is only present when not using the [vectorized execution engine]({% link v23.2/architecture/sql-layer.md %}#vectorized-query-execution); that is, when running with `SET vectorize = off;`. This bug has existed since CockroachDB v22.1. [#122836][#122836] -- Fixed a bug in the DB Console's [**Custom Chart**]({% link v23.2/ui-custom-chart-debug-page.md %}) tool where store-level metrics were displayed only for the first store ID associated with the node. Now data is displayed for all stores present on a node, and a single time series is shown for each store, rather than an aggregated value for all of the node's stores. This allows finer-grained monitoring of store-level metrics. [#122703][#122703] -- Fixed a bug where privileges granted for [external connections]({% link v23.2/create-external-connection.md %}) were incorrectly showing up in [`SHOW SYSTEM GRANTS`]({% link v23.2/show-system-grants.md %}), but were not useful because there was no associated object name. The privileges no longer appear in `SHOW SYSTEM GRANTS`. Instead, the `SHOW GRANTS ON EXTERNAL CONNECTION` statement should be used. [#122905][#122905] -- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.2/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. [#122458][#122458] +- Fixed a bug where [client certificate authentication]({% link v23.2/authentication.md %}#client-authentication) combined with [identity maps]({% link v23.2/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work. For the feature to work correctly, the client must specify a valid database user in the [connection string]({% link v23.2/connection-parameters.md %}). #122749 +- Fixed a bug where the [row-based execution engine]({% link v23.2/architecture/sql-layer.md %}#query-execution) could drop a [`LIMIT`]({% link v23.2/limit-offset.md %}) clause when there was an [`ORDER BY`]({% link v23.2/order-by.md %}) clause, and the ordering was partially provided by an input operator. For example, this bug could occur with an ordering such as `ORDER BY a, b` when the scanned index was only ordered on column `a`. The impact of this bug was that more rows may have been returned than specified by the `LIMIT` clause. This bug is only present when not using the [vectorized execution engine]({% link v23.2/architecture/sql-layer.md %}#vectorized-query-execution); that is, when running with `SET vectorize = off;`. This bug has existed since CockroachDB v22.1. #122836 +- Fixed a bug in the DB Console's [**Custom Chart**]({% link v23.2/ui-custom-chart-debug-page.md %}) tool where store-level metrics were displayed only for the first store ID associated with the node. Now data is displayed for all stores present on a node, and a single time series is shown for each store, rather than an aggregated value for all of the node's stores. This allows finer-grained monitoring of store-level metrics. #122703 +- Fixed a bug where privileges granted for [external connections]({% link v23.2/create-external-connection.md %}) were incorrectly showing up in [`SHOW SYSTEM GRANTS`]({% link v23.2/show-system-grants.md %}), but were not useful because there was no associated object name. The privileges no longer appear in `SHOW SYSTEM GRANTS`. Instead, the `SHOW GRANTS ON EXTERNAL CONNECTION` statement should be used. #122905 +- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v23.2/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. #122458 - Fixed a bug introduced in v23.2 that could cause a [PL/pgSQL]({% link v23.2/plpgsql.md %}) routine to return incorrect results when the routine included: - At least one parameter. - - An `IF` statement with one leak-proof branch and one branch with side effects. [#120742][#120742] -- Fixed a bug that could result in an internal error when attempting to create a [PL/pgSQL]({% link v23.2/plpgsql.md %}) routine using the (currently unsupported) `%ROWTYPE` syntax for a variable declaration. [#123010][#123010] -- Fixed a bug where a [`RESTORE`]({% link v23.2/restore.md %}) of a backup that itself contained a table created by the `RESTORE` of a table with an in-progress [`IMPORT INTO`]({% link v23.2/import-into.md %}) would fail to restore all rows. [#120543][#120543] -- Fixed a bug introduced in v23.2 that could cause a [PL/pgSQL]({% link v23.2/plpgsql.md %}) variable assignment to not be executed if the variable was never referenced after the assignment. [#123116][#123116] -- Fixed a bug where CockroachDB could run into an `attempting to append refresh spans after the tracked timestamp has moved forward` internal error in some edge cases. The bug had been present since v22.2. [#123150][#123150] -- A [job]({% link v23.2/show-jobs.md %}) will now log rather than fail if it reports an out-of-bound progress fraction. [#122964][#122964] -- Fixed a bug that would occur when [`ALTER TYPE ... DROP VALUE`]({% link v23.2/alter-type.md %}) is followed by [`DROP SCHEMA CASCADE ...`]({% link v23.2/drop-schema.md %}) in the same transaction. Previously, the `ALTER TYPE` schema change would get queued up to run at commit time, but by that point, the type may have already been removed, so the commit could fail. [#123576][#123576] -- Fixed a bug that could lead to descriptors with self references that pointed to incorrect descriptor IDs. Now, tables that see the error `invalid inbound foreign key ... origin table ID should be` or `invalid outbound foreign key ... reference table ID should be` will automatically repair post deserialization. [#123681][#123681] -- Fixed a bug where a failed [restore]({% link v23.2/restore.md %}) job could leave the system in a state where re-attempting the restore was not possible without manual intervention. [#123463][#123463] -- [Index recommendations]({% link v23.2/ui-databases-page.md %}#index-recommendations) in the [DB Console]({% link v23.2/ui-overview.md %}) will now function properly for indexes on tables or columns whose names contain quotation marks or whitespace. For example: `CREATE INDEX ON "my table" ("my col");`. [#122119][#122119] -- Fixed a crash introduced in v23.2.5 that could occur when planning [statistics collection]({% link v23.2/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) on a table with a [virtual computed column]({% link v23.2/computed-columns.md %}) using a user-defined type when the newly introduced [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.stats.virtual_computed_columns.enabled` is set to `true`. (The setting was introduced in v23.2.4 and set to `false` by default.) [#124080][#124080] -- Added automated clean up and validation for [dropped roles]({% link v23.2/drop-role.md %}) inside descriptors. [#124670][#124670] -- Fixed a bug where [`DROP ROLE`]({% link v23.2/drop-role.md %}) and [`DROP USER`]({% link v23.2/drop-user.md %}) could leave references behind inside types, which could prevent [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) from working. [#124668][#124668] -- Fixed a bug where a change to a [user-defined type]({% link v23.2/create-type.md %}) could cause queries against tables using that type to fail with the error `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v23.2/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ADD REGION`]({% link v23.2/alter-database.md %}#add-region) or [`DROP REGION`]({% link v23.2/alter-database.md %}#drop-region) statement (which implicitly change the `crdb_internal_region` type). This was present since user-defined types were introduced in v20.2. [#124854][#124854] + - An `IF` statement with one leak-proof branch and one branch with side effects. #120742 +- Fixed a bug that could result in an internal error when attempting to create a [PL/pgSQL]({% link v23.2/plpgsql.md %}) routine using the (currently unsupported) `%ROWTYPE` syntax for a variable declaration. #123010 +- Fixed a bug where a [`RESTORE`]({% link v23.2/restore.md %}) of a backup that itself contained a table created by the `RESTORE` of a table with an in-progress [`IMPORT INTO`]({% link v23.2/import-into.md %}) would fail to restore all rows. #120543 +- Fixed a bug introduced in v23.2 that could cause a [PL/pgSQL]({% link v23.2/plpgsql.md %}) variable assignment to not be executed if the variable was never referenced after the assignment. #123116 +- Fixed a bug where CockroachDB could run into an `attempting to append refresh spans after the tracked timestamp has moved forward` internal error in some edge cases. The bug had been present since v22.2. #123150 +- A [job]({% link v23.2/show-jobs.md %}) will now log rather than fail if it reports an out-of-bound progress fraction. #122964 +- Fixed a bug that would occur when [`ALTER TYPE ... DROP VALUE`]({% link v23.2/alter-type.md %}) is followed by [`DROP SCHEMA CASCADE ...`]({% link v23.2/drop-schema.md %}) in the same transaction. Previously, the `ALTER TYPE` schema change would get queued up to run at commit time, but by that point, the type may have already been removed, so the commit could fail. #123576 +- Fixed a bug that could lead to descriptors with self references that pointed to incorrect descriptor IDs. Now, tables that see the error `invalid inbound foreign key ... origin table ID should be` or `invalid outbound foreign key ... reference table ID should be` will automatically repair post deserialization. #123681 +- Fixed a bug where a failed [restore]({% link v23.2/restore.md %}) job could leave the system in a state where re-attempting the restore was not possible without manual intervention. #123463 +- [Index recommendations]({% link v23.2/ui-databases-page.md %}#index-recommendations) in the [DB Console]({% link v23.2/ui-overview.md %}) will now function properly for indexes on tables or columns whose names contain quotation marks or whitespace. For example: `CREATE INDEX ON "my table" ("my col");`. #122119 +- Fixed a crash introduced in v23.2.5 that could occur when planning [statistics collection]({% link v23.2/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) on a table with a [virtual computed column]({% link v23.2/computed-columns.md %}) using a user-defined type when the newly introduced [cluster setting]({% link v23.2/cluster-settings.md %}) `sql.stats.virtual_computed_columns.enabled` is set to `true`. (The setting was introduced in v23.2.4 and set to `false` by default.) #124080 +- Added automated clean up and validation for [dropped roles]({% link v23.2/drop-role.md %}) inside descriptors. #124670 +- Fixed a bug where [`DROP ROLE`]({% link v23.2/drop-role.md %}) and [`DROP USER`]({% link v23.2/drop-user.md %}) could leave references behind inside types, which could prevent [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) from working. #124668 +- Fixed a bug where a change to a [user-defined type]({% link v23.2/create-type.md %}) could cause queries against tables using that type to fail with the error `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v23.2/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ADD REGION`]({% link v23.2/alter-database.md %}#add-region) or [`DROP REGION`]({% link v23.2/alter-database.md %}#drop-region) statement (which implicitly change the `crdb_internal_region` type). This was present since user-defined types were introduced in v20.2. #124854

Performance improvements

-- More efficient [query plans]({% link v23.2/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.2/session-variables.md %}) is enabled. It is disabled by default. [#122753][#122753] -- The [optimizer]({% link v23.2/cost-based-optimizer.md %}) now costs [`distinct-on`]({% link v23.2/select-clause.md %}#eliminate-duplicate-rows) operators more accurately. It may produce more efficient query plans in some cases. [#122844][#122844] -- Added a new [session setting]({% link v23.2/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v23.2/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan. [#123100][#123100] -- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v23.2/cost-based-optimizer.md %}) from choosing a bad query plan due to over-estimating the selectivity of a multi-column predicate. [#123100][#123100] -- Improved the efficiency of error handling in the [vectorized execution engine]({% link v23.2/vectorized-execution.md %}) to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. [#123502][#123502] +- More efficient [query plans]({% link v23.2/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v23.2/session-variables.md %}) is enabled. It is disabled by default. #122753 +- The [optimizer]({% link v23.2/cost-based-optimizer.md %}) now costs [`distinct-on`]({% link v23.2/select-clause.md %}#eliminate-duplicate-rows) operators more accurately. It may produce more efficient query plans in some cases. #122844 +- Added a new [session setting]({% link v23.2/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v23.2/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan. #123100 +- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v23.2/cost-based-optimizer.md %}) from choosing a bad query plan due to over-estimating the selectivity of a multi-column predicate. #123100 +- Improved the efficiency of error handling in the [vectorized execution engine]({% link v23.2/vectorized-execution.md %}) to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. #123502
@@ -81,41 +81,3 @@ This release includes 115 merged PRs by 32 authors.
-[#120543]: https://github.com/cockroachdb/cockroach/pull/120543 -[#120742]: https://github.com/cockroachdb/cockroach/pull/120742 -[#122119]: https://github.com/cockroachdb/cockroach/pull/122119 -[#122458]: https://github.com/cockroachdb/cockroach/pull/122458 -[#122703]: https://github.com/cockroachdb/cockroach/pull/122703 -[#122749]: https://github.com/cockroachdb/cockroach/pull/122749 -[#122753]: https://github.com/cockroachdb/cockroach/pull/122753 -[#122815]: https://github.com/cockroachdb/cockroach/pull/122815 -[#122822]: https://github.com/cockroachdb/cockroach/pull/122822 -[#122836]: https://github.com/cockroachdb/cockroach/pull/122836 -[#122844]: https://github.com/cockroachdb/cockroach/pull/122844 -[#122905]: https://github.com/cockroachdb/cockroach/pull/122905 -[#122964]: https://github.com/cockroachdb/cockroach/pull/122964 -[#123010]: https://github.com/cockroachdb/cockroach/pull/123010 -[#123067]: https://github.com/cockroachdb/cockroach/pull/123067 -[#123100]: https://github.com/cockroachdb/cockroach/pull/123100 -[#123116]: https://github.com/cockroachdb/cockroach/pull/123116 -[#123150]: https://github.com/cockroachdb/cockroach/pull/123150 -[#123374]: https://github.com/cockroachdb/cockroach/pull/123374 -[#123463]: https://github.com/cockroachdb/cockroach/pull/123463 -[#123469]: https://github.com/cockroachdb/cockroach/pull/123469 -[#123502]: https://github.com/cockroachdb/cockroach/pull/123502 -[#123576]: https://github.com/cockroachdb/cockroach/pull/123576 -[#123594]: https://github.com/cockroachdb/cockroach/pull/123594 -[#123681]: https://github.com/cockroachdb/cockroach/pull/123681 -[#123901]: https://github.com/cockroachdb/cockroach/pull/123901 -[#123960]: https://github.com/cockroachdb/cockroach/pull/123960 -[#123966]: https://github.com/cockroachdb/cockroach/pull/123966 -[#124071]: https://github.com/cockroachdb/cockroach/pull/124071 -[#124080]: https://github.com/cockroachdb/cockroach/pull/124080 -[#124083]: https://github.com/cockroachdb/cockroach/pull/124083 -[#124260]: https://github.com/cockroachdb/cockroach/pull/124260 -[#124298]: https://github.com/cockroachdb/cockroach/pull/124298 -[#124353]: https://github.com/cockroachdb/cockroach/pull/124353 -[#124374]: https://github.com/cockroachdb/cockroach/pull/124374 -[#124668]: https://github.com/cockroachdb/cockroach/pull/124668 -[#124670]: https://github.com/cockroachdb/cockroach/pull/124670 -[#124854]: https://github.com/cockroachdb/cockroach/pull/124854 diff --git a/src/current/_includes/releases/v23.2/v23.2.7.md b/src/current/_includes/releases/v23.2/v23.2.7.md index 08dc368650d..69e0e8c9e31 100644 --- a/src/current/_includes/releases/v23.2/v23.2.7.md +++ b/src/current/_includes/releases/v23.2/v23.2.7.md @@ -6,13 +6,13 @@ Release Date: July 2, 2024

{{ site.data.products.enterprise }} edition changes

-- [Changefeeds]({% link v23.2/change-data-capture-overview.md %}) can use the bulk oracle for planning, which distributes work evenly across all [replicas]({% link v23.2/architecture/reads-and-writes-overview.md %}#replica) in the locality filter, including followers if enabled. Set the `changefeed.random_replica_selection.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) to `true` to enable this planning behavior. To use the previous bin-packing oracle, set the cluster setting `changefeed.random_replica_selection.enabled` to `false`. [#124925][#124925] -- [`ALTER CHANGEFEED`]({% link v23.2/alter-changefeed.md %}) no longer removes the [CDC query]({% link v23.2/cdc-queries.md %}) when modifying changefeed properties. [#125437][#125437] +- [Changefeeds]({% link v23.2/change-data-capture-overview.md %}) can use the bulk oracle for planning, which distributes work evenly across all [replicas]({% link v23.2/architecture/reads-and-writes-overview.md %}#replica) in the locality filter, including followers if enabled. Set the `changefeed.random_replica_selection.enabled` [cluster setting]({% link v23.2/cluster-settings.md %}) to `true` to enable this planning behavior. To use the previous bin-packing oracle, set the cluster setting `changefeed.random_replica_selection.enabled` to `false`. #124925 +- [`ALTER CHANGEFEED`]({% link v23.2/alter-changefeed.md %}) no longer removes the [CDC query]({% link v23.2/cdc-queries.md %}) when modifying changefeed properties. #125437

SQL language changes

-- Precision is no longer limited when encoding `geo` data types to JSON. [#124535][#124535] -- When the new `optimizer_push_offset_into_index_join` [session setting]({% link v23.2/set-vars.md %}) is enabled, the [optimizer]({% link v23.2/cost-based-optimizer.md %}) attempts to produce more efficient query plans by attempting to push offset expressions into index join expressions to produce more efficient query plans. [#124492][#124492] +- Precision is no longer limited when encoding `geo` data types to JSON. #124535 +- When the new `optimizer_push_offset_into_index_join` [session setting]({% link v23.2/set-vars.md %}) is enabled, the [optimizer]({% link v23.2/cost-based-optimizer.md %}) attempts to produce more efficient query plans by attempting to push offset expressions into index join expressions to produce more efficient query plans. #124492

General changes

@@ -20,35 +20,35 @@ Release Date: July 2, 2024

Operational changes

-- Improved metrics related to [disk usage]({% link v23.2/ui-storage-dashboard.md %}#capacity-metrics) reporting for volumes that dynamically change their size over time. [#125107][#125107] +- Improved metrics related to [disk usage]({% link v23.2/ui-storage-dashboard.md %}#capacity-metrics) reporting for volumes that dynamically change their size over time. #125107

Security changes

-- Improved the automated cleanup when dropping roles inside descriptors. [#124665][#124665] +- Improved the automated cleanup when dropping roles inside descriptors. #124665

Bug fixes

-- Fixed a bug where a range with a replication factor of `1` to be scaled up to a replication factor of `2`. [#124487][#124487] -- Fixed a bug that could cause leases to thrash between nodes when perturbed with a replication factor of `1`. [#124487][#124487] -- Fixed a bug where, when the `ttl_row_stats_poll_interval` storage parameter is non-zero, the job to update row statistics for a table with [row-level TTL]({% link v23.2/row-level-ttl.md %}) enabled could be blocked from completing by the queries issued to update the row statistics. Now, if the job completes, these queries are cancelled, and the `jobs.row_level_ttl.total_rows` and `jobs.row_level_ttl.total_expired_rows` metrics will report 0 if the job finishes before the queries to update the row statistics complete. [#124626][#124626] -- Fixed a bug where the `results_buffer_size` [session setting]({% link v23.2/set-vars.md %}) could not be configured using the `options` query parameter in the connection string, but only as a top-level query parameter. This variable cannot be changed by using the `SET` command after the session begins. [#124774][#124774] -- Fixed a bug where dropping a role or user could leave references behind inside types. This in turn could prevent the [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) command from working correctly. [#124644][#124644] - -- Fixed a bug where the [`ALTER TABLE ... ALTER PRIMARY KEY`]({% link v23.2/alter-table.md %}#alter-primary-key) command could hang for a table if its indexes are referred to by views or functions using the `force syntax` syntax. [#124569][#124569] -- Fixed a bug where the [`SHOW TYPES`]({% link v23.2/show-types.md %}) command omitted user-defined composite types. This bug was introduced in v23.1. [#124816][#124816] -- Fixed a bug where if a column name that contains UTF-8 characters is referenced in the `STORING()` clause of the [`CREATE INDEX`]({% link v23.2/create-index.md %}) command, the [declarative schema changer]({% link v23.2/online-schema-changes.md %}) cannot detect whether the column is already handled by an existing index. [#125211][#125211] -- Fixed a bug where the [declarative schema changer]({% link v23.2/online-schema-changes.md %}) erroneously includes virtual columns that are referenced in the `STORING()` clause of the [`CREATE INDEX`]({% link v23.2/create-index.md %}) command. [#125211][#125211] -- Fixed a bug introduced in v20.2, where a change to a user-defined type could cause queries against tables using that type to fail with the error like `histogram.go: span must be fully contained in the bucket`. This bug could occur if the change was from an [`ALTER TABLE`]({% link v23.2/alter-table.md %}) command or from an [`ALTER DATABASE ... ADD REGION`]({% link v23.2/alter-database.md %}) or [`ALTER DATABASE ... DROP REGION`]({% link v23.2/alter-database.md %}) command, which implicitly change the non-public `crdb_internal_region` type. [#124853][#124853] -- Fixed a bug where telemetry logs could emit the same statement fingerprint ID for different SQL statements. [#125043][#125043] -- Fixed a bug where adding a column with a default value of an empty array could fail. [#125326][#125326] -- Fixed a bug where the [declarative schema changer]({% link v23.2/online-schema-changes.md %}) could erroneously succeed despite a violation of an `ALTER TABLE ... ADD CONSTRAINT UNIQUE` constraint. Now such a violation results in an error message with the error code `42601`. [#125418][#125418] -- Fixed a [changefeed]({% link v23.2/create-and-configure-changefeeds.md %}) panic in v24.1, v23.2, and v23.1 when the [cluster setting]({% link v23.2/cluster-settings.md %}) `changefeed.aggregator.flush_jitter` [cluster setting]({% link v23.2/cluster-settings.md %}) is configured and a changefeed's [`min_checkpoint_frequency`]({% link v23.2/create-changefeed.md %}#min-checkpoint-frequency) option is set to zero. [#125469][#125469] -- Fixed a bug where the public schema was erroneously created with its owner set to the `admin` role instead of the database owner. Ownership of the public schema can be altered after the schema is created. [#125535][#125535] -- Fixed a bug introduced in v23.2.0 where inserting rows into a [`REGIONAL BY ROW` table]({% link v23.2/table-localities.md %}#regional-by-row-tables) could cause an internal error if the source was a `VALUES` clause with a single row and at least one boolean expression. [#126208][#126208] +- Fixed a bug where a range with a replication factor of `1` to be scaled up to a replication factor of `2`. #124487 +- Fixed a bug that could cause leases to thrash between nodes when perturbed with a replication factor of `1`. #124487 +- Fixed a bug where, when the `ttl_row_stats_poll_interval` storage parameter is non-zero, the job to update row statistics for a table with [row-level TTL]({% link v23.2/row-level-ttl.md %}) enabled could be blocked from completing by the queries issued to update the row statistics. Now, if the job completes, these queries are cancelled, and the `jobs.row_level_ttl.total_rows` and `jobs.row_level_ttl.total_expired_rows` metrics will report 0 if the job finishes before the queries to update the row statistics complete. #124626 +- Fixed a bug where the `results_buffer_size` [session setting]({% link v23.2/set-vars.md %}) could not be configured using the `options` query parameter in the connection string, but only as a top-level query parameter. This variable cannot be changed by using the `SET` command after the session begins. #124774 +- Fixed a bug where dropping a role or user could leave references behind inside types. This in turn could prevent the [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) command from working correctly. #124644 + +- Fixed a bug where the [`ALTER TABLE ... ALTER PRIMARY KEY`]({% link v23.2/alter-table.md %}#alter-primary-key) command could hang for a table if its indexes are referred to by views or functions using the `force syntax` syntax. #124569 +- Fixed a bug where the [`SHOW TYPES`]({% link v23.2/show-types.md %}) command omitted user-defined composite types. This bug was introduced in v23.1. #124816 +- Fixed a bug where if a column name that contains UTF-8 characters is referenced in the `STORING()` clause of the [`CREATE INDEX`]({% link v23.2/create-index.md %}) command, the [declarative schema changer]({% link v23.2/online-schema-changes.md %}) cannot detect whether the column is already handled by an existing index. #125211 +- Fixed a bug where the [declarative schema changer]({% link v23.2/online-schema-changes.md %}) erroneously includes virtual columns that are referenced in the `STORING()` clause of the [`CREATE INDEX`]({% link v23.2/create-index.md %}) command. #125211 +- Fixed a bug introduced in v20.2, where a change to a user-defined type could cause queries against tables using that type to fail with the error like `histogram.go: span must be fully contained in the bucket`. This bug could occur if the change was from an [`ALTER TABLE`]({% link v23.2/alter-table.md %}) command or from an [`ALTER DATABASE ... ADD REGION`]({% link v23.2/alter-database.md %}) or [`ALTER DATABASE ... DROP REGION`]({% link v23.2/alter-database.md %}) command, which implicitly change the non-public `crdb_internal_region` type. #124853 +- Fixed a bug where telemetry logs could emit the same statement fingerprint ID for different SQL statements. #125043 +- Fixed a bug where adding a column with a default value of an empty array could fail. #125326 +- Fixed a bug where the [declarative schema changer]({% link v23.2/online-schema-changes.md %}) could erroneously succeed despite a violation of an `ALTER TABLE ... ADD CONSTRAINT UNIQUE` constraint. Now such a violation results in an error message with the error code `42601`. #125418 +- Fixed a [changefeed]({% link v23.2/create-and-configure-changefeeds.md %}) panic in v24.1, v23.2, and v23.1 when the [cluster setting]({% link v23.2/cluster-settings.md %}) `changefeed.aggregator.flush_jitter` [cluster setting]({% link v23.2/cluster-settings.md %}) is configured and a changefeed's [`min_checkpoint_frequency`]({% link v23.2/create-changefeed.md %}#min-checkpoint-frequency) option is set to zero. #125469 +- Fixed a bug where the public schema was erroneously created with its owner set to the `admin` role instead of the database owner. Ownership of the public schema can be altered after the schema is created. #125535 +- Fixed a bug introduced in v23.2.0 where inserting rows into a [`REGIONAL BY ROW` table]({% link v23.2/table-localities.md %}#regional-by-row-tables) could cause an internal error if the source was a `VALUES` clause with a single row and at least one boolean expression. #126208

Performance improvements

-- The optimizer now generates more efficient query plans for some queries with [`OFFSET`]({% link v23.2/limit-offset.md %}#offset) clauses. [#124492][#124492] +- The optimizer now generates more efficient query plans for some queries with [`OFFSET`]({% link v23.2/limit-offset.md %}#offset) clauses. #124492
@@ -56,26 +56,3 @@ Release Date: July 2, 2024 This release includes 77 merged PRs by 30 authors.
-[#122704]: https://github.com/cockroachdb/cockroach/pull/122704 -[#124487]: https://github.com/cockroachdb/cockroach/pull/124487 -[#124492]: https://github.com/cockroachdb/cockroach/pull/124492 -[#124535]: https://github.com/cockroachdb/cockroach/pull/124535 -[#124569]: https://github.com/cockroachdb/cockroach/pull/124569 -[#124626]: https://github.com/cockroachdb/cockroach/pull/124626 -[#124644]: https://github.com/cockroachdb/cockroach/pull/124644 -[#124665]: https://github.com/cockroachdb/cockroach/pull/124665 -[#124774]: https://github.com/cockroachdb/cockroach/pull/124774 -[#124800]: https://github.com/cockroachdb/cockroach/pull/124800 -[#124816]: https://github.com/cockroachdb/cockroach/pull/124816 -[#124853]: https://github.com/cockroachdb/cockroach/pull/124853 -[#124925]: https://github.com/cockroachdb/cockroach/pull/124925 -[#125043]: https://github.com/cockroachdb/cockroach/pull/125043 -[#125107]: https://github.com/cockroachdb/cockroach/pull/125107 -[#125211]: https://github.com/cockroachdb/cockroach/pull/125211 -[#125326]: https://github.com/cockroachdb/cockroach/pull/125326 -[#125418]: https://github.com/cockroachdb/cockroach/pull/125418 -[#125437]: https://github.com/cockroachdb/cockroach/pull/125437 -[#125469]: https://github.com/cockroachdb/cockroach/pull/125469 -[#125535]: https://github.com/cockroachdb/cockroach/pull/125535 -[#125543]: https://github.com/cockroachdb/cockroach/pull/125543 -[#126208]: https://github.com/cockroachdb/cockroach/pull/126208 diff --git a/src/current/_includes/releases/v23.2/v23.2.8.md b/src/current/_includes/releases/v23.2/v23.2.8.md index f7fe6e684d3..b451bfd3fc5 100644 --- a/src/current/_includes/releases/v23.2/v23.2.8.md +++ b/src/current/_includes/releases/v23.2/v23.2.8.md @@ -6,12 +6,12 @@ Release Date: July 15, 2024

Performance improvements

-- Updated the [replica allocator]({% link v23.2/architecture/replication-layer.md %}) with a small performance win for very large clusters. [#126918](https://github.com/cockroachdb/cockroach/pull/#126918) -- Updated the [gossip layer]({% link v23.2/architecture/distribution-layer.md %}) to avoid unnecessary mutex contention. [#126919](https://github.com/cockroachdb/cockroach/pull/126919), [#126920](https://github.com/cockroachdb/cockroach/pull/126920) +- Updated the [replica allocator]({% link v23.2/architecture/replication-layer.md %}) with a small performance win for very large clusters. #126918 +- Updated the [gossip layer]({% link v23.2/architecture/distribution-layer.md %}) to avoid unnecessary mutex contention. #126919, #126920

Bug fixes

-- Fixed a bug where the `disallow_full_table_scans` [session variable]({% link v23.2/session-variables.md %}#disallow-full-table-scans) was not working for tables with [hash-sharded indexes]({% link v23.2/hash-sharded-indexes.md %}). [#126700](https://github.com/cockroachdb/cockroach/pull/126700) +- Fixed a bug where the `disallow_full_table_scans` [session variable]({% link v23.2/session-variables.md %}#disallow-full-table-scans) was not working for tables with [hash-sharded indexes]({% link v23.2/hash-sharded-indexes.md %}). #126700
diff --git a/src/current/_includes/releases/v23.2/v23.2.9.md b/src/current/_includes/releases/v23.2/v23.2.9.md index e6df91ec36e..0283b96088c 100644 --- a/src/current/_includes/releases/v23.2/v23.2.9.md +++ b/src/current/_includes/releases/v23.2/v23.2.9.md @@ -6,38 +6,38 @@ Release Date: August 1, 2024

SQL language changes

-- [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) statements are now supported when executed via Cloud Console [SQL shell]({% link cockroachcloud/sql-shell.md %}). [#125562][#125562] -- Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v23.2/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v23.2/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. [#126298][#126298] +- [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) statements are now supported when executed via Cloud Console [SQL shell]({% link cockroachcloud/sql-shell.md %}). #125562 +- Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v23.2/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v23.2/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. #126298

Operational changes

-- `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` have been removed from the [debug zip]({% link v23.2/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the [per-node execution]({% link v23.2/cockroach-debug-zip.md %}#files) insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. [#125804][#125804] -- Some debugging-only information about physical plans is no longer collected in the `system.job_info` table for [changefeeds]({% link v23.2/change-data-capture-overview.md %}), because it has the potential to grow very large. [#126097][#126097] -- For the [TELEMETRY channel]({% link v23.2/logging.md %}#telemetry), TCL [`sampled_query`]({% link v23.2/eventlog.md %}#sampled_query) events will now be sampled at the rate specified by the setting [`sql.telemetry.query_sampling.max_event_frequency`]({% link v23.2/cluster-settings.md %}#setting-sql-telemetry-query-sampling-max-event-frequency), which is already used to limit the rate of sampling DML statements. [#126728][#126728] +- `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` have been removed from the [debug zip]({% link v23.2/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the [per-node execution]({% link v23.2/cockroach-debug-zip.md %}#files) insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. #125804 +- Some debugging-only information about physical plans is no longer collected in the `system.job_info` table for [changefeeds]({% link v23.2/change-data-capture-overview.md %}), because it has the potential to grow very large. #126097 +- For the [TELEMETRY channel]({% link v23.2/logging.md %}#telemetry), TCL [`sampled_query`]({% link v23.2/eventlog.md %}#sampled_query) events will now be sampled at the rate specified by the setting [`sql.telemetry.query_sampling.max_event_frequency`]({% link v23.2/cluster-settings.md %}#setting-sql-telemetry-query-sampling-max-event-frequency), which is already used to limit the rate of sampling DML statements. #126728

Bug fixes

-- Fixed a bug introduced in v23.2.0 in which CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v23.2/insert.md %}) into [`REGIONAL BY ROW`]({% link v23.2/alter-table.md %}#set-the-table-locality-to-regional-by-row) tables where the source was a [`VALUES`]({% link v23.2/selection-queries.md %}#values-clause) clause with a single row and at least one boolean expression. [#125504][#125504] [#126839][#126839] -- Fixed a bug where a [`DROP ROLE`]({% link v23.2/drop-role.md %}) or [`DROP USER`]({% link v23.2/drop-user.md %}) command could leave references behind inside types, which could prevent subsequent [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) commands from working. [#125806][#125806] -- Fixed a bug that could lead to descriptors having privileges to roles that no longer exist. Added an automated clean up for [dropped roles]({% link v23.2/drop-role.md %}) inside descriptors. [#125806][#125806] -- Fixed a bug where a change to a [user-defined type (UDT)]({% link v23.2/create-type.md %}) could cause queries against tables using that type to fail with an error message like: `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could occur either directly from an [`ALTER TYPE`]({% link v23.2/alter-type.md %}) statement or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v23.2/alter-database.md %}#add-region) or [`ALTER DATABASE ... DROP REGION`]({% link v23.2/alter-database.md %}#drop-region) statement, which implicitly modifies the `crdb_internal_region` UDT. This bug had existed since UDTs were introduced in v20.2. [#125806][#125806] -- Fixed a bug in which constant `LIKE` patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. [#125538][#125538] -- Fixed a bug introduced in alpha versions of v23.1 where calling a routine could result in an unexpected `function ... does not exist` error. The bug is triggered when the routine is called twice using the exact same SQL query, and either: (a) the routine has polymorphic arguments, or: (b) between the two calls, the routine is replaced by a routine with the same name and different parameters. [#123518][#123518] -- Fixed the statistics estimation code in the [optimizer]({% link v23.2/cost-based-optimizer.md %}) so it does not use the empty histograms produced if [histogram collection]({% link v23.2/cost-based-optimizer.md %}#control-histogram-collection) has been disabled during stats collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug has existed since v22.1. [#126156][#126156] -- Fixed a bug in [`cockroach debug tsdump`]({% link v23.2/cockroach-debug-tsdump.md %}) where the command fails when a custom SQL port is used and the [`--format=raw`]({% link v23.2/cockroach-debug-tsdump.md %}#flags) flag is provided. [#126184][#126184] -- Fixed a bug where a [user-defined function (UDF)]({% link v23.2/user-defined-functions.md %}) that shared a name with a [built-in function]({% link v23.2/functions-and-operators.md %}#built-in-functions) would not be resolved, even if the UDF had higher precedence according to the [`search_path`]({% link v23.2/sql-name-resolution.md %}#search-path) variable. [#126295][#126295] -- Fixed a bug that caused [background jobs]({% link v23.2/show-jobs.md %}) to incorrectly respect a statement timeout. [#126819][#126819] -- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v23.2/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v23.2/expression-indexes.md %}). [#126598][#126598] -- Fixed a bug when [restoring]({% link v23.2/restore.md %}) a database with a [composite type]({% link v23.2/create-type.md %}#create-a-composite-data-type). [#126841][#126841] -- Fixed a bug when inputting `public` role as user name for [built-in compatibility functions]({% link v24.2/functions-and-operators.md %}#compatibility-functions), such as `has_database_privilege` and `has_schema_privilege`. [#126852][#126852] -- Fixed a bug where the [Database page]({% link v23.2/ui-databases-page.md %}) could crash if range information is not available. [#127091][#127091] -- Fixed a bug where CockroachDB could incorrectly evaluate an [`IS NOT NULL`]({% link v23.2/null-handling.md %}#nulls-and-simple-comparisons) filter if it was applied to non-`NULL` tuples that had `NULL` elements, such as `(1, NULL)` or `(NULL, NULL)`. This bug has existed since v20.2. [#126937][#126937] -- In the [DB Console event log]({% link v23.2/ui-overview-dashboard.md %}#events-panel), [`ALTER ROLE`]({% link v23.2/alter-role.md %}) events now display correctly even when no [role options]({% link v23.2/alter-role.md %}#role-options) are included in the `ALTER ROLE` statement. [#126565][#126565] -- Fixed a bug where [`CREATE TABLE`]({% link v23.2/create-table.md %}) with [index expressions]({% link v23.2/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v23.2/transactions.md %}#transaction-retries). [#126201][#126201] +- Fixed a bug introduced in v23.2.0 in which CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v23.2/insert.md %}) into [`REGIONAL BY ROW`]({% link v23.2/alter-table.md %}#set-the-table-locality-to-regional-by-row) tables where the source was a [`VALUES`]({% link v23.2/selection-queries.md %}#values-clause) clause with a single row and at least one boolean expression. #125504 #126839 +- Fixed a bug where a [`DROP ROLE`]({% link v23.2/drop-role.md %}) or [`DROP USER`]({% link v23.2/drop-user.md %}) command could leave references behind inside types, which could prevent subsequent [`SHOW GRANTS`]({% link v23.2/show-grants.md %}) commands from working. #125806 +- Fixed a bug that could lead to descriptors having privileges to roles that no longer exist. Added an automated clean up for [dropped roles]({% link v23.2/drop-role.md %}) inside descriptors. #125806 +- Fixed a bug where a change to a [user-defined type (UDT)]({% link v23.2/create-type.md %}) could cause queries against tables using that type to fail with an error message like: `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could occur either directly from an [`ALTER TYPE`]({% link v23.2/alter-type.md %}) statement or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v23.2/alter-database.md %}#add-region) or [`ALTER DATABASE ... DROP REGION`]({% link v23.2/alter-database.md %}#drop-region) statement, which implicitly modifies the `crdb_internal_region` UDT. This bug had existed since UDTs were introduced in v20.2. #125806 +- Fixed a bug in which constant `LIKE` patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. #125538 +- Fixed a bug introduced in alpha versions of v23.1 where calling a routine could result in an unexpected `function ... does not exist` error. The bug is triggered when the routine is called twice using the exact same SQL query, and either: (a) the routine has polymorphic arguments, or: (b) between the two calls, the routine is replaced by a routine with the same name and different parameters. #123518 +- Fixed the statistics estimation code in the [optimizer]({% link v23.2/cost-based-optimizer.md %}) so it does not use the empty histograms produced if [histogram collection]({% link v23.2/cost-based-optimizer.md %}#control-histogram-collection) has been disabled during stats collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug has existed since v22.1. #126156 +- Fixed a bug in [`cockroach debug tsdump`]({% link v23.2/cockroach-debug-tsdump.md %}) where the command fails when a custom SQL port is used and the [`--format=raw`]({% link v23.2/cockroach-debug-tsdump.md %}#flags) flag is provided. #126184 +- Fixed a bug where a [user-defined function (UDF)]({% link v23.2/user-defined-functions.md %}) that shared a name with a [built-in function]({% link v23.2/functions-and-operators.md %}#built-in-functions) would not be resolved, even if the UDF had higher precedence according to the [`search_path`]({% link v23.2/sql-name-resolution.md %}#search-path) variable. #126295 +- Fixed a bug that caused [background jobs]({% link v23.2/show-jobs.md %}) to incorrectly respect a statement timeout. #126819 +- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v23.2/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v23.2/expression-indexes.md %}). #126598 +- Fixed a bug when [restoring]({% link v23.2/restore.md %}) a database with a [composite type]({% link v23.2/create-type.md %}#create-a-composite-data-type). #126841 +- Fixed a bug when inputting `public` role as user name for [built-in compatibility functions]({% link v24.2/functions-and-operators.md %}#compatibility-functions), such as `has_database_privilege` and `has_schema_privilege`. #126852 +- Fixed a bug where the [Database page]({% link v23.2/ui-databases-page.md %}) could crash if range information is not available. #127091 +- Fixed a bug where CockroachDB could incorrectly evaluate an [`IS NOT NULL`]({% link v23.2/null-handling.md %}#nulls-and-simple-comparisons) filter if it was applied to non-`NULL` tuples that had `NULL` elements, such as `(1, NULL)` or `(NULL, NULL)`. This bug has existed since v20.2. #126937 +- In the [DB Console event log]({% link v23.2/ui-overview-dashboard.md %}#events-panel), [`ALTER ROLE`]({% link v23.2/alter-role.md %}) events now display correctly even when no [role options]({% link v23.2/alter-role.md %}#role-options) are included in the `ALTER ROLE` statement. #126565 +- Fixed a bug where [`CREATE TABLE`]({% link v23.2/create-table.md %}) with [index expressions]({% link v23.2/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v23.2/transactions.md %}#transaction-retries). #126201

Performance improvements

-- [Schema changes]({% link v23.2/online-schema-changes.md %}) that cause a data backfill, such as adding a non-nullable column or changing the primary key, will now split and scatter the temporary indexes used to perform the change. This reduces the chance of causing a write hotspot that can slow down foreground traffic. [#126691][#126691] +- [Schema changes]({% link v23.2/online-schema-changes.md %}) that cause a data backfill, such as adding a non-nullable column or changing the primary key, will now split and scatter the temporary indexes used to perform the change. This reduces the chance of causing a write hotspot that can slow down foreground traffic. #126691
@@ -47,29 +47,3 @@ This release includes 100 merged PRs by 33 authors.
-[#123518]: https://github.com/cockroachdb/cockroach/pull/123518 -[#125504]: https://github.com/cockroachdb/cockroach/pull/125504 -[#125538]: https://github.com/cockroachdb/cockroach/pull/125538 -[#125562]: https://github.com/cockroachdb/cockroach/pull/125562 -[#125804]: https://github.com/cockroachdb/cockroach/pull/125804 -[#125806]: https://github.com/cockroachdb/cockroach/pull/125806 -[#126097]: https://github.com/cockroachdb/cockroach/pull/126097 -[#126156]: https://github.com/cockroachdb/cockroach/pull/126156 -[#126184]: https://github.com/cockroachdb/cockroach/pull/126184 -[#126201]: https://github.com/cockroachdb/cockroach/pull/126201 -[#126216]: https://github.com/cockroachdb/cockroach/pull/126216 -[#126295]: https://github.com/cockroachdb/cockroach/pull/126295 -[#126298]: https://github.com/cockroachdb/cockroach/pull/126298 -[#126565]: https://github.com/cockroachdb/cockroach/pull/126565 -[#126598]: https://github.com/cockroachdb/cockroach/pull/126598 -[#126691]: https://github.com/cockroachdb/cockroach/pull/126691 -[#126728]: https://github.com/cockroachdb/cockroach/pull/126728 -[#126819]: https://github.com/cockroachdb/cockroach/pull/126819 -[#126839]: https://github.com/cockroachdb/cockroach/pull/126839 -[#126841]: https://github.com/cockroachdb/cockroach/pull/126841 -[#126852]: https://github.com/cockroachdb/cockroach/pull/126852 -[#126937]: https://github.com/cockroachdb/cockroach/pull/126937 -[#127091]: https://github.com/cockroachdb/cockroach/pull/127091 -[#127608]: https://github.com/cockroachdb/cockroach/pull/127608 -[44d16f97a]: https://github.com/cockroachdb/cockroach/commit/44d16f97a -[7fb249aa1]: https://github.com/cockroachdb/cockroach/commit/7fb249aa1 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-alpha.1.md b/src/current/_includes/releases/v24.1/v24.1.0-alpha.1.md index b8f53da691e..f751b46603b 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-alpha.1.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-alpha.1.md @@ -6,169 +6,169 @@ Release Date: March 7, 2024

Backward-incompatible changes

-- [`AS OF SYSTEM TIME`]({% link v24.1/as-of-system-time.md %}) queries can no longer use a timestamp followed by a question mark to signify a future-time value. This was an undocumented syntax. [#116830][#116830] +- [`AS OF SYSTEM TIME`]({% link v24.1/as-of-system-time.md %}) queries can no longer use a timestamp followed by a question mark to signify a future-time value. This was an undocumented syntax. #116830

{{ site.data.products.enterprise }} edition changes

-- [`ALTER CHANGEFEED`]({% link v24.1/alter-changefeed.md %}) no longer removes a [CDC query]({% link v24.1/cdc-queries.md %}) when modifying changefeed properties. [#116498][#116498] -- `changefeed.balance_range_distribution.enable` is now deprecated. Instead, use the new [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.default_range_distribution_strategy`. `changefeed.default_range_distribution_strategy='balanced_simple'` has the same effect as setting `changefeed.balance_range_distribution.enable=true`. It does not require `initial_scan='only'`, which was required by the old setting. [#115166][#115166] -- CDC queries now correctly handle the [`changefeed_creation_timestamp`]({% link v24.1/cdc-queries.md %}#cdc-query-function-support) function. [#117520][#117520] -- The new syntax `ALTER VIRTUAL CLUSTER virtual-cluster START REPLICATION OF virtual-cluster ON physical-cluster` can now be used to reconfigure virtual clusters previously serving as sources for [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) to become standbys to a promoted standby. This reverses the direction of replication while maximizing data reuse. [#117656][#117656] -- [`BACKUP`]({% link v24.1/backup.md %})s now load range information that is used to avoid a spike in metadata lookups when backups begin. [#116520][#116520] -- Clusters created to run [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) no longer automatically disable the [`spanconfig.range_coalescing.system.enabled`]({% link v24.1/cluster-settings.md %}#setting-spanconfig-storage-coalesce-adjacent-enabled) and [`spanconfig.range_coalescing.application.enabled`]({% link v24.1/cluster-settings.md %}#setting-spanconfig-tenant-coalesce-adjacent-enabled) cluster settings. Users who started using physical cluster replication on v23.1 or v23.2 may wish to manually reset these settings. [#119221][#119221] -- Physical cluster replication is now always enabled, and the `physical_replication.enabled` cluster setting has been removed. [#119149][#119149] +- [`ALTER CHANGEFEED`]({% link v24.1/alter-changefeed.md %}) no longer removes a [CDC query]({% link v24.1/cdc-queries.md %}) when modifying changefeed properties. #116498 +- `changefeed.balance_range_distribution.enable` is now deprecated. Instead, use the new [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.default_range_distribution_strategy`. `changefeed.default_range_distribution_strategy='balanced_simple'` has the same effect as setting `changefeed.balance_range_distribution.enable=true`. It does not require `initial_scan='only'`, which was required by the old setting. #115166 +- CDC queries now correctly handle the [`changefeed_creation_timestamp`]({% link v24.1/cdc-queries.md %}#cdc-query-function-support) function. #117520 +- The new syntax `ALTER VIRTUAL CLUSTER virtual-cluster START REPLICATION OF virtual-cluster ON physical-cluster` can now be used to reconfigure virtual clusters previously serving as sources for [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) to become standbys to a promoted standby. This reverses the direction of replication while maximizing data reuse. #117656 +- [`BACKUP`]({% link v24.1/backup.md %})s now load range information that is used to avoid a spike in metadata lookups when backups begin. #116520 +- Clusters created to run [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) no longer automatically disable the [`spanconfig.range_coalescing.system.enabled`]({% link v24.1/cluster-settings.md %}#setting-spanconfig-storage-coalesce-adjacent-enabled) and [`spanconfig.range_coalescing.application.enabled`]({% link v24.1/cluster-settings.md %}#setting-spanconfig-tenant-coalesce-adjacent-enabled) cluster settings. Users who started using physical cluster replication on v23.1 or v23.2 may wish to manually reset these settings. #119221 +- Physical cluster replication is now always enabled, and the `physical_replication.enabled` cluster setting has been removed. #119149

SQL language changes

-- [`ALTER BACKUP SCHEDULE ... EXECUTE IMMEDIATELY`]({% link v24.1/alter-backup-schedule.md %}) can now be used to set the next scheduled execution of the backup schedule to the current time. [#112118][#112118] -- Fixed the [**SQL Activity**]({% link v24.1/ui-sql-dashboard.md %}) update job to avoid conflicts on update, reduced the amount of data cached to just what the overview page requires, and fixed the correctess of the top queries. [#112350][#112350] -- Previously, user-defined composite types were not populated in two `pg_catalog` tables: `pg_class` (whose row entries pertain to the type) and `pg_attribute` (whose row entries pertain to the "columns" of the type). This PostgreSQL-incompatible behavior is now fixed by populating the tables with user-defined composite types. In addition, the `typrelid` column in the `pg_type` table has the proper `oid` for composite types. [#111179][#111179] -- The newly added [built-in function]({% link v24.1/functions-and-operators.md %}#array-functions) `jsonb_array_to_string_array` no longer removes `NULL` objects. It now includes them in the resulting array. [#112975][#112975] -- Changed the display for RU estimates shown in [`EXPLAIN ANALYZE`]({% link v24.1/explain-analyze.md %}) from integer to float. This will prevent small estimates from being rounded to zero, which makes the estimate less confusing for cheap queries. [#111986][#111986] -- The `information_schema._pg_char_octet_length` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. [#111401][#111401] -- The `pg_encoding_max_length` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. [#111401][#111401] -- The `information_schema._pg_datetime_precision` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. [#111401][#111401] -- The `information_schema._pg_interval_type` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. [#111401][#111401] -- `information_schema.user_defined_types` is now populated with information about [user-defined types]({% link v24.1/create-type.md %}), and `information_schema.attributes` is now populated with information about the attributes of [composite data types]({% link v24.1/create-type.md %}#create-a-composite-data-type). [#111401][#111401] -- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) will no longer generate a constrained scan that only uses filters from a [check]({% link v24.1/check.md %}) constraint. This prevents cases where a constrained scan actually scans the entire table because the constraints aren't selective. [#114332][#114332] -- Reads rolled back by savepoints are now refreshable, matching the PostgreSQL behavior and avoiding potential serializability violations. [#111424][#111424] -- Implemented the postgis `ST_TileEnvelope` [built-in function]({% link v24.1/functions-and-operators.md %}). [#112971][#112971] -- Added support for a third argument in the `array_position` [built-in function]({% link v24.1/functions-and-operators.md %}). If provided, it gives the index from which to begin searching in the array. [#112161][#112161] -- Added the `bit_count` [built-in function]({% link v24.1/functions-and-operators.md %}) for [`BIT`]({% link v24.1/bit.md %}) and [`BYTES`]({% link v24.1/bytes.md %}) types. [#115273][#115273] -- Added a `pg_backend_pid` column to `crdb_internal.node_sessions` and `crdb_internal.cluster_sessions`. This value corresponds to the numerical ID returned from `pg_backend_pid`. [#116673][#116673] -- Column type changes now require an explicit cast when automatic casting is not possible. This aligns with PostgreSQL's behavior. Previously, certain type conversions, such as [`BOOL`]({% link v24.1/bool.md %}) to [`INT`]({% link v24.1/int.md %}), were allowed without an explicit cast. [#115442][#115442] -- Added a new [session setting]({% link v24.1/session-variables.md %}), `optimizer_merge_joins_enabled` that, when true, instructs the [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) to explore query plans with merge joins. The setting defaults to `true`. [#116410][#116410] -- CockroachDB now supports parsing queries like [`SELECT FROM t`]({% link v24.1/selection-queries.md %}) that only produce the row count and do not output any columns. [#116835][#116835] -- Added the `metaphone` [built-in function]({% link v24.1/functions-and-operators.md %}), which converts a string to its Metaphone code. [#110950][#110950] -- The new `EXPIRATION WINDOW` option for [`ALTER VIRTUAL CLUSTER`]({% link v24.1/alter-virtual-cluster.md %}) allows the user to override the default producer job expiration window of 24 hours. For example, `ALTER VIRTUAL CLUSTER appTenant SET REPLICATION EXPIRATION WINDOW ='100ms'`. The producer job expiration window determines how long the producer job stays alive without a heartbeat from the consumer job. [#117776][#117776] -- The [`SKIP LOCKED`]({% link v24.1/select-for-update.md %}#wait-policies) clause is now allowed with `SELECT ... FOR SHARE`. [#117560][#117560] -- Added configurable [cluster settings]({% link v24.1/cluster-settings.md %}) for total TCP keep alive probes (`server.sql_tcp_keep_alive.count`) and TCP probe intervals (`server.sql_tcp_keep_alive.interval`) for SQL connections. Removed the `COCKROACH_SQL_TCP_KEEP_ALIVE` environment variable subsuming it. [#115833][#115833] -- Removed the `sql.trace.session_eventlog.enabled` cluster setting and the associated event log tracing. The information in these traces is still available in the [`DEV` log channel]({% link v24.1/logging-overview.md %}#logging-channels) by enabling `--vmodule=conn_executor=2` with [`cockroach start`]({% link v24.1/cockroach-start.md %}). [#117928][#117928] -- The `array_agg` [aggregate function]({% link v24.1/functions-and-operators.md %}#aggregate-functions) can now support arrays as the input. Note that CockroachDB does not yet fully support nested arrays, and `array_agg` does not support nested arrays as inputs. [#117838][#117838] -- An execution statistic that measures "client time" is now included in `plan.txt` files of [statement diagnostics bundles]({% link v24.1/cockroach-statement-diag.md %}). Client time tracks how long the query execution was blocked on the client receiving the PGWire protocol messages. Note that when obtained via [`EXPLAIN ANALYZE (DEBUG)`]({% link v24.1/explain-analyze.md %}#debug-option), client time does not make sense because in this variant the output rows are discarded and not communicated to the client. [#117591][#117591] -- Added the `trace_id` column to the response of the [`SHOW SESSIONS`]({% link v24.1/show-sessions.md %}) command. [#118002][#118002] -- Added support for the `ENCODING` option of `COPY`, as long as the encoding of `'utf8'` is specified. [#118010][#118010] -- Added the `SHOW VARIABLES FOR ROLE` command, which allows the database administrator to easily view the default values for [session variables]({% link v24.1/session-variables.md %}) applied to a given user. [#117875][#117875] -- The `sql.txn.read_committed_isolation.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is now `true` by default. This means that any syntax and settings that configure the [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation level will now cause the transaction to use that isolation level, rather than automatically upgrading the transaction to `SERIALIZABLE`. [#118479][#118479] -- Added a new [cluster setting]({% link v24.1/cluster-settings.md %}), `sql.stats.virtual_computed_columns.enabled`, which when set enables collection of table statistics on [`VIRTUAL` computed columns]({% link v24.1/computed-columns.md %}). [#118241][#118241] -- Added the `autocommit_before_ddl` [session variable]({% link v24.1/session-variables.md %}). When set to `true`, any schema change statement that is sent during an explicit transaction will cause the transaction to commit before executing the schema change. [#118440][#118440] -- [`CREATE SEQUENCE`]({% link v24.1/create-sequence.md %}) is now enabled by default in the declarative schema changer. [#117793][#117793] -- [PL/pgSQL]({% link v24.1/plpgsql.md %}) now supports nested blocks, with the following limitations: variable shadowing is disallowed, and exception handlers cannot be used in a routine with nested blocks. [#117710][#117710] -- The [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.index_recommendation.drop_unused_duration` is now public. [#118676][#118676] -- It is now possible to hint to the [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) that it should plan a straight join by using the syntax `... INNER STRAIGHT JOIN ...`. If the hint is provided, the optimizer will now fix the join order as given in the query, even if it estimates that a different plan using join reordering would have a lower cost. [#116013][#116013] -- Add column `goroutine_id` to the response of the `SHOW SESSIONS` command. [#118644][#118644] -- Introduced a new [session setting]({% link v24.1/session-variables.md %}), `close_cursors_at_commit`, which causes a cursor to remain open even after its calling transaction commits. Note that transaction rollback still closes any cursor created in that transaction. [#117910][#117910] -- Added the `server.max_open_transactions_per_gateway` [cluster setting]({% link v24.1/cluster-settings.md %}). When set to a non-negative value, non-`admin` users cannot execute a query if the number of transactions open on the current gateway node is already at the configured limit. [#118781][#118781] -- Added the `setseed` [built-in function]({% link v24.1/functions-and-operators.md %}). It sets the seed for the random generator used by the `random` built-in function. [#119042][#119042] -- `OUT` and `INOUT` parameter classes are now supported in [user-defined functions]({% link v24.1/user-defined-functions.md %}). [#118610][#118610] -- Out-of-process SQL servers will now start exporting a new `sql.aggregated_livebytes` [metric]({% link v24.1/metrics.md %}). This metric gets updated once every 60 seconds by default, and its update interval can be configured via the `tenant_global_metrics_exporter_interval` [cluster setting]({% link v24.1/cluster-settings.md %}). [#119140][#119140] -- Added support for index hints with [`INSERT`]({% link v24.1/insert.md %}) and [`UPSERT`]({% link v24.1/upsert.md %}) statements. This allows `INSERT ... ON CONFLICT` and `UPSERT` queries to use index hints in the same way they are already supported for [`UPDATE`]({% link v24.1/update.md %}) and [`DELETE`]({% link v24.1/delete.md %}) statements. [#119104][#119104] -- Added a new [`ttl_disable_changefeed_replication`]({% link v24.1/row-level-ttl.md %}#filter-changefeeds-for-tables-using-row-level-ttl) table storage parameter that can be used to disable changefeed replication for [row-level TTL]({% link v24.1/row-level-ttl.md %}) on a per-table basis. [#119611][#119611] +- [`ALTER BACKUP SCHEDULE ... EXECUTE IMMEDIATELY`]({% link v24.1/alter-backup-schedule.md %}) can now be used to set the next scheduled execution of the backup schedule to the current time. #112118 +- Fixed the [**SQL Activity**]({% link v24.1/ui-sql-dashboard.md %}) update job to avoid conflicts on update, reduced the amount of data cached to just what the overview page requires, and fixed the correctess of the top queries. #112350 +- Previously, user-defined composite types were not populated in two `pg_catalog` tables: `pg_class` (whose row entries pertain to the type) and `pg_attribute` (whose row entries pertain to the "columns" of the type). This PostgreSQL-incompatible behavior is now fixed by populating the tables with user-defined composite types. In addition, the `typrelid` column in the `pg_type` table has the proper `oid` for composite types. #111179 +- The newly added [built-in function]({% link v24.1/functions-and-operators.md %}#array-functions) `jsonb_array_to_string_array` no longer removes `NULL` objects. It now includes them in the resulting array. #112975 +- Changed the display for RU estimates shown in [`EXPLAIN ANALYZE`]({% link v24.1/explain-analyze.md %}) from integer to float. This will prevent small estimates from being rounded to zero, which makes the estimate less confusing for cheap queries. #111986 +- The `information_schema._pg_char_octet_length` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. #111401 +- The `pg_encoding_max_length` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. #111401 +- The `information_schema._pg_datetime_precision` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. #111401 +- The `information_schema._pg_interval_type` [built-in function]({% link v24.1/functions-and-operators.md %}) is now supported, which improves compatibility with PostgreSQL. #111401 +- `information_schema.user_defined_types` is now populated with information about [user-defined types]({% link v24.1/create-type.md %}), and `information_schema.attributes` is now populated with information about the attributes of [composite data types]({% link v24.1/create-type.md %}#create-a-composite-data-type). #111401 +- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) will no longer generate a constrained scan that only uses filters from a [check]({% link v24.1/check.md %}) constraint. This prevents cases where a constrained scan actually scans the entire table because the constraints aren't selective. #114332 +- Reads rolled back by savepoints are now refreshable, matching the PostgreSQL behavior and avoiding potential serializability violations. #111424 +- Implemented the postgis `ST_TileEnvelope` [built-in function]({% link v24.1/functions-and-operators.md %}). #112971 +- Added support for a third argument in the `array_position` [built-in function]({% link v24.1/functions-and-operators.md %}). If provided, it gives the index from which to begin searching in the array. #112161 +- Added the `bit_count` [built-in function]({% link v24.1/functions-and-operators.md %}) for [`BIT`]({% link v24.1/bit.md %}) and [`BYTES`]({% link v24.1/bytes.md %}) types. #115273 +- Added a `pg_backend_pid` column to `crdb_internal.node_sessions` and `crdb_internal.cluster_sessions`. This value corresponds to the numerical ID returned from `pg_backend_pid`. #116673 +- Column type changes now require an explicit cast when automatic casting is not possible. This aligns with PostgreSQL's behavior. Previously, certain type conversions, such as [`BOOL`]({% link v24.1/bool.md %}) to [`INT`]({% link v24.1/int.md %}), were allowed without an explicit cast. #115442 +- Added a new [session setting]({% link v24.1/session-variables.md %}), `optimizer_merge_joins_enabled` that, when true, instructs the [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) to explore query plans with merge joins. The setting defaults to `true`. #116410 +- CockroachDB now supports parsing queries like [`SELECT FROM t`]({% link v24.1/selection-queries.md %}) that only produce the row count and do not output any columns. #116835 +- Added the `metaphone` [built-in function]({% link v24.1/functions-and-operators.md %}), which converts a string to its Metaphone code. #110950 +- The new `EXPIRATION WINDOW` option for [`ALTER VIRTUAL CLUSTER`]({% link v24.1/alter-virtual-cluster.md %}) allows the user to override the default producer job expiration window of 24 hours. For example, `ALTER VIRTUAL CLUSTER appTenant SET REPLICATION EXPIRATION WINDOW ='100ms'`. The producer job expiration window determines how long the producer job stays alive without a heartbeat from the consumer job. #117776 +- The [`SKIP LOCKED`]({% link v24.1/select-for-update.md %}#wait-policies) clause is now allowed with `SELECT ... FOR SHARE`. #117560 +- Added configurable [cluster settings]({% link v24.1/cluster-settings.md %}) for total TCP keep alive probes (`server.sql_tcp_keep_alive.count`) and TCP probe intervals (`server.sql_tcp_keep_alive.interval`) for SQL connections. Removed the `COCKROACH_SQL_TCP_KEEP_ALIVE` environment variable subsuming it. #115833 +- Removed the `sql.trace.session_eventlog.enabled` cluster setting and the associated event log tracing. The information in these traces is still available in the [`DEV` log channel]({% link v24.1/logging-overview.md %}#logging-channels) by enabling `--vmodule=conn_executor=2` with [`cockroach start`]({% link v24.1/cockroach-start.md %}). #117928 +- The `array_agg` [aggregate function]({% link v24.1/functions-and-operators.md %}#aggregate-functions) can now support arrays as the input. Note that CockroachDB does not yet fully support nested arrays, and `array_agg` does not support nested arrays as inputs. #117838 +- An execution statistic that measures "client time" is now included in `plan.txt` files of [statement diagnostics bundles]({% link v24.1/cockroach-statement-diag.md %}). Client time tracks how long the query execution was blocked on the client receiving the PGWire protocol messages. Note that when obtained via [`EXPLAIN ANALYZE (DEBUG)`]({% link v24.1/explain-analyze.md %}#debug-option), client time does not make sense because in this variant the output rows are discarded and not communicated to the client. #117591 +- Added the `trace_id` column to the response of the [`SHOW SESSIONS`]({% link v24.1/show-sessions.md %}) command. #118002 +- Added support for the `ENCODING` option of `COPY`, as long as the encoding of `'utf8'` is specified. #118010 +- Added the `SHOW VARIABLES FOR ROLE` command, which allows the database administrator to easily view the default values for [session variables]({% link v24.1/session-variables.md %}) applied to a given user. #117875 +- The `sql.txn.read_committed_isolation.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is now `true` by default. This means that any syntax and settings that configure the [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation level will now cause the transaction to use that isolation level, rather than automatically upgrading the transaction to `SERIALIZABLE`. #118479 +- Added a new [cluster setting]({% link v24.1/cluster-settings.md %}), `sql.stats.virtual_computed_columns.enabled`, which when set enables collection of table statistics on [`VIRTUAL` computed columns]({% link v24.1/computed-columns.md %}). #118241 +- Added the `autocommit_before_ddl` [session variable]({% link v24.1/session-variables.md %}). When set to `true`, any schema change statement that is sent during an explicit transaction will cause the transaction to commit before executing the schema change. #118440 +- [`CREATE SEQUENCE`]({% link v24.1/create-sequence.md %}) is now enabled by default in the declarative schema changer. #117793 +- [PL/pgSQL]({% link v24.1/plpgsql.md %}) now supports nested blocks, with the following limitations: variable shadowing is disallowed, and exception handlers cannot be used in a routine with nested blocks. #117710 +- The [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.index_recommendation.drop_unused_duration` is now public. #118676 +- It is now possible to hint to the [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) that it should plan a straight join by using the syntax `... INNER STRAIGHT JOIN ...`. If the hint is provided, the optimizer will now fix the join order as given in the query, even if it estimates that a different plan using join reordering would have a lower cost. #116013 +- Add column `goroutine_id` to the response of the `SHOW SESSIONS` command. #118644 +- Introduced a new [session setting]({% link v24.1/session-variables.md %}), `close_cursors_at_commit`, which causes a cursor to remain open even after its calling transaction commits. Note that transaction rollback still closes any cursor created in that transaction. #117910 +- Added the `server.max_open_transactions_per_gateway` [cluster setting]({% link v24.1/cluster-settings.md %}). When set to a non-negative value, non-`admin` users cannot execute a query if the number of transactions open on the current gateway node is already at the configured limit. #118781 +- Added the `setseed` [built-in function]({% link v24.1/functions-and-operators.md %}). It sets the seed for the random generator used by the `random` built-in function. #119042 +- `OUT` and `INOUT` parameter classes are now supported in [user-defined functions]({% link v24.1/user-defined-functions.md %}). #118610 +- Out-of-process SQL servers will now start exporting a new `sql.aggregated_livebytes` [metric]({% link v24.1/metrics.md %}). This metric gets updated once every 60 seconds by default, and its update interval can be configured via the `tenant_global_metrics_exporter_interval` [cluster setting]({% link v24.1/cluster-settings.md %}). #119140 +- Added support for index hints with [`INSERT`]({% link v24.1/insert.md %}) and [`UPSERT`]({% link v24.1/upsert.md %}) statements. This allows `INSERT ... ON CONFLICT` and `UPSERT` queries to use index hints in the same way they are already supported for [`UPDATE`]({% link v24.1/update.md %}) and [`DELETE`]({% link v24.1/delete.md %}) statements. #119104 +- Added a new [`ttl_disable_changefeed_replication`]({% link v24.1/row-level-ttl.md %}#filter-changefeeds-for-tables-using-row-level-ttl) table storage parameter that can be used to disable changefeed replication for [row-level TTL]({% link v24.1/row-level-ttl.md %}) on a per-table basis. #119611

Operational changes

-- The internal versions that are reported during [cluster upgrades]({% link v24.1/upgrade-cockroach-version.md %}) have been renamed for clarity. For example, `23.2-8` is now named `23.2-upgrading-to-24.1-step-008`. [#115223][#115223] -- Introduced a new cluster setting, `server.jwt_authentication.jwks_auto_fetch.enabled`, enabling automatic fetching of [JSON Web Key Sets (JWKS)]({% link v24.1/sso-sql.md %}) from an issuer's remote endpoint. This prevents an administrator's need to update the JWKS specified in `server.jwt_authentication.jwks` - whether manually or by custom script - when the identity provider's keys rotate. That direct specification of JWKS remains the default, as the new cluster setting defaults to `false`. [#117054][#117054] -- Updated the error message logged in the case of stalled disks to use the appropriate term "disk stall", matching the term used in metrics and dashboards. This was previously "file write stall". [#114746][#114746] -- Introduced the `changefeed.emitted_batch_sizes` histogram metric that measures the batch sizes used when emitting data to [sinks]({% link v24.1/changefeed-sinks.md %}). This metric supports [metrics labels]({% link v24.1/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels). [#115537][#115537] -- Introduced metrics `log_fluent_sink_conn_attempts`, `log_fluent_sink_write_attempts`, and `log_fluent_sink_write_errors` to enable more precise tracking of connection and write operations when [logging to Fluentd-compatible network collectors]({% link v24.1/configure-logs.md %}#output-to-fluentd-compatible-network-collectors). [#116699][#116699] -- The [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.contention.record_serialization_conflicts.enabled` is now `on` by default. This means any [`40001` errors]({% link v24.1/transaction-retry-error-reference.md %}) that are returned containing conflicting transaction information will be recorded by the contention registry. [#116664][#116664] -- Removed the `kv.rangefeed.scheduler.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) because the [rangefeed]({% link v24.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) scheduler is now unconditionally enabled. [#114410][#114410] -- Removed the `kv.rangefeed.catchup_scan_concurrency` [cluster setting]({% link v24.1/cluster-settings.md %}). Catchup scans are throttled via [`kv.rangefeed.concurrent_catchup_iterators`]({% link v24.1/advanced-changefeed-configuration.md %}) on a per-node basis. [#114408][#114408] -- Removed the [`changefeed.mux_rangefeed.enabled`]({% link v24.1/advanced-changefeed-configuration.md %}#mux-rangefeeds) cluster setting because the functionality is enabled by default. [#114408][#114408] -- The gossip status [Advanced Debug page]({% link v24.1/ui-debug-pages.md %}) now includes information about the server's high water timestamps for every other node it knows about in the gossip cluster. [#117011][#117011] -- Removed the `cockroach_rangefeed_rpc_initial_window_size` environment variable. The rangefeed connection now uses the same window size as other RPC connections. [#117545][#117545] -- [Events]({% link v24.1/eventlog.md %}#miscellaneous-sql-events) for [cluster setting]({% link v24.1/cluster-settings.md %}) changes are now emitted to the `OPS` channel rather than the `DEV` channel. [#117923][#117923] -- The new environment variable `cockroach_rpc_use_default_connection_class` enables operators to switch back to the prior default behavior of sending most network/RPC workloads, except system traffic, through a single RPC/TCP connection, in case the environment does not tolerate multiple TCP connections. v24.1 defaults to using multiple connections, each dedicated to a particular types of traffic, specifically for [Raft]({% link v24.1/architecture/replication-layer.md %}#raft) or [rangefeed]({% link v24.1/change-data-capture-overview.md %}) data. For more information, see additional release notes that reference this variable name. [#117810][#117810] +- The internal versions that are reported during [cluster upgrades]({% link v24.1/upgrade-cockroach-version.md %}) have been renamed for clarity. For example, `23.2-8` is now named `23.2-upgrading-to-24.1-step-008`. #115223 +- Introduced a new cluster setting, `server.jwt_authentication.jwks_auto_fetch.enabled`, enabling automatic fetching of [JSON Web Key Sets (JWKS)]({% link v24.1/sso-sql.md %}) from an issuer's remote endpoint. This prevents an administrator's need to update the JWKS specified in `server.jwt_authentication.jwks` - whether manually or by custom script - when the identity provider's keys rotate. That direct specification of JWKS remains the default, as the new cluster setting defaults to `false`. #117054 +- Updated the error message logged in the case of stalled disks to use the appropriate term "disk stall", matching the term used in metrics and dashboards. This was previously "file write stall". #114746 +- Introduced the `changefeed.emitted_batch_sizes` histogram metric that measures the batch sizes used when emitting data to [sinks]({% link v24.1/changefeed-sinks.md %}). This metric supports [metrics labels]({% link v24.1/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels). #115537 +- Introduced metrics `log_fluent_sink_conn_attempts`, `log_fluent_sink_write_attempts`, and `log_fluent_sink_write_errors` to enable more precise tracking of connection and write operations when [logging to Fluentd-compatible network collectors]({% link v24.1/configure-logs.md %}#output-to-fluentd-compatible-network-collectors). #116699 +- The [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.contention.record_serialization_conflicts.enabled` is now `on` by default. This means any [`40001` errors]({% link v24.1/transaction-retry-error-reference.md %}) that are returned containing conflicting transaction information will be recorded by the contention registry. #116664 +- Removed the `kv.rangefeed.scheduler.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) because the [rangefeed]({% link v24.1/create-and-configure-changefeeds.md %}#enable-rangefeeds) scheduler is now unconditionally enabled. #114410 +- Removed the `kv.rangefeed.catchup_scan_concurrency` [cluster setting]({% link v24.1/cluster-settings.md %}). Catchup scans are throttled via [`kv.rangefeed.concurrent_catchup_iterators`]({% link v24.1/advanced-changefeed-configuration.md %}) on a per-node basis. #114408 +- Removed the [`changefeed.mux_rangefeed.enabled`]({% link v24.1/advanced-changefeed-configuration.md %}#mux-rangefeeds) cluster setting because the functionality is enabled by default. #114408 +- The gossip status [Advanced Debug page]({% link v24.1/ui-debug-pages.md %}) now includes information about the server's high water timestamps for every other node it knows about in the gossip cluster. #117011 +- Removed the `cockroach_rangefeed_rpc_initial_window_size` environment variable. The rangefeed connection now uses the same window size as other RPC connections. #117545 +- [Events]({% link v24.1/eventlog.md %}#miscellaneous-sql-events) for [cluster setting]({% link v24.1/cluster-settings.md %}) changes are now emitted to the `OPS` channel rather than the `DEV` channel. #117923 +- The new environment variable `cockroach_rpc_use_default_connection_class` enables operators to switch back to the prior default behavior of sending most network/RPC workloads, except system traffic, through a single RPC/TCP connection, in case the environment does not tolerate multiple TCP connections. v24.1 defaults to using multiple connections, each dedicated to a particular types of traffic, specifically for [Raft]({% link v24.1/architecture/replication-layer.md %}#raft) or [rangefeed]({% link v24.1/change-data-capture-overview.md %}) data. For more information, see additional release notes that reference this variable name. #117810 - In unredacted [debug zips]({% link v24.1/cockroach-debug-zip.md %}), the `crdb_internal.transaction_contention_events` table file has two new columns: - `waiting_stmt_query`: the query of the waiting statement. - - `blocking_txn_queries_unordered`: the unordered list of the blocking transaction's queries. [#118478][#118478] -- Transaction replay protection state is now passed between the outgoing and incoming [leaseholder]({% link v24.1/architecture/replication-layer.md %}#leases) for a range during a [lease transfer]({% link v24.1/architecture/replication-layer.md %}#epoch-based-leases-table-data). This avoids cases where lease transfers can cause transactions to throw `TransactionAbortedError(ABORT_REASON_NEW_LEASE_PREVENTS_TXN)` errors. [#118300][#118300] -- CockroachDB will now automatically generate [CPU profiles]({% link v24.1/automatic-cpu-profiler.md %}) if there is an increase in CPU utilization. This can help inform investigations into possible issues. [#118850][#118850] -- Expanded the [`--include-range-info`]({% link v24.1/cockroach-debug-zip.md %}) flag to include problem ranges. This flag still defaults to `true`. [#119205][#119205] + - `blocking_txn_queries_unordered`: the unordered list of the blocking transaction's queries. #118478 +- Transaction replay protection state is now passed between the outgoing and incoming [leaseholder]({% link v24.1/architecture/replication-layer.md %}#leases) for a range during a [lease transfer]({% link v24.1/architecture/replication-layer.md %}#epoch-based-leases-table-data). This avoids cases where lease transfers can cause transactions to throw `TransactionAbortedError(ABORT_REASON_NEW_LEASE_PREVENTS_TXN)` errors. #118300 +- CockroachDB will now automatically generate [CPU profiles]({% link v24.1/automatic-cpu-profiler.md %}) if there is an increase in CPU utilization. This can help inform investigations into possible issues. #118850 +- Expanded the [`--include-range-info`]({% link v24.1/cockroach-debug-zip.md %}) flag to include problem ranges. This flag still defaults to `true`. #119205

Command-line changes

-- [Debug zips]({% link v24.1/cockroach-debug-zip.md %}) no longer include redundant `hex_` columns for system table `BYTES` columns. [#112033][#112033] -- Added the `--follower-read-percent` flag, which determines the percent (0-100) of read operations that are follower reads, to the [`cockroach workload kv run`]({% link v24.1/cockroach-workload.md %}) command. [#113094][#113094] -- The workload `schemachange` now writes a `.otlp.ndjson.gz` archive containing OTLP trace bundles for debugging purposes. [#114770][#114770] -- [`cockroach debug tsdump`]({% link v24.1/cockroach-debug-tsdump.md %}) creates a `tsdump.yaml` file. The `tsdump` raw format automatically creates the YAML file in the default location `/tmp/tsdump.yaml`. Added a new flag `--yaml` that allows users to specify the path to create `tsdump.yaml` instead of using the default location. For example, `cockroach debug tsdump --host : \ --format raw --yaml=/some_path/tsdump.yaml > /some_path/tsdump.gob`. [#114046][#114046] -- Removed the `cockroach connect` command functionality. This was [deprecated]({% link releases/v23.2.md %}#v23-2-0-deprecations) in CockroachDB v23.2. [#113893][#113893] -- Changed the SQL shell help URL to point to [`cockroach-sql`]({% link v24.1/cockroach-sql-binary.md %}). [#118960][#118960] -- Added a new `encode-uri` utility to make generating connection strings for use with [Physical Cluster Replication]({% link v24.1/physical-cluster-replication-overview.md %}) easier. [#119528][#119528] +- [Debug zips]({% link v24.1/cockroach-debug-zip.md %}) no longer include redundant `hex_` columns for system table `BYTES` columns. #112033 +- Added the `--follower-read-percent` flag, which determines the percent (0-100) of read operations that are follower reads, to the [`cockroach workload kv run`]({% link v24.1/cockroach-workload.md %}) command. #113094 +- The workload `schemachange` now writes a `.otlp.ndjson.gz` archive containing OTLP trace bundles for debugging purposes. #114770 +- [`cockroach debug tsdump`]({% link v24.1/cockroach-debug-tsdump.md %}) creates a `tsdump.yaml` file. The `tsdump` raw format automatically creates the YAML file in the default location `/tmp/tsdump.yaml`. Added a new flag `--yaml` that allows users to specify the path to create `tsdump.yaml` instead of using the default location. For example, `cockroach debug tsdump --host : \ --format raw --yaml=/some_path/tsdump.yaml > /some_path/tsdump.gob`. #114046 +- Removed the `cockroach connect` command functionality. This was [deprecated]({% link releases/v23.2.md %}#v23-2-0-deprecations) in CockroachDB v23.2. #113893 +- Changed the SQL shell help URL to point to [`cockroach-sql`]({% link v24.1/cockroach-sql-binary.md %}). #118960 +- Added a new `encode-uri` utility to make generating connection strings for use with [Physical Cluster Replication]({% link v24.1/physical-cluster-replication-overview.md %}) easier. #119528

DB Console changes

-- Store initialization now logs progress every 10 seconds showing the current and total number of [replicas]({% link v24.1/architecture/replication-layer.md %}) initialized. [#115760][#115760] -- Introduced a new **Lease Preferences** graph on the [Replication dashboard]({% link v24.1/ui-replication-dashboard.md %}). The **Lease Preferences** graph will indicate when the current [leaseholder]({% link v24.1/architecture/replication-layer.md %}#leases) is not the first lease preference and where the current leaseholder satisfies no applied lease preference. [#116709][#116709] -- Updated the [Statement Details page]({% link v24.1/ui-statements-page.md %}) to always show the entire selected period, instead of just the period that had data. [#118680][#118680] -- Error messages displayed upon failure to load DB Console views now include information about the HTTP response status code, if one is present. [#118782][#118782] -- The **Full Table/Index Scans** chart in the [SQL Metrics dashboard]({% link v24.1/ui-sql-dashboard.md %}) now shows the non-negative derivative of the number of full scans tracked. [#118787][#118787] +- Store initialization now logs progress every 10 seconds showing the current and total number of [replicas]({% link v24.1/architecture/replication-layer.md %}) initialized. #115760 +- Introduced a new **Lease Preferences** graph on the [Replication dashboard]({% link v24.1/ui-replication-dashboard.md %}). The **Lease Preferences** graph will indicate when the current [leaseholder]({% link v24.1/architecture/replication-layer.md %}#leases) is not the first lease preference and where the current leaseholder satisfies no applied lease preference. #116709 +- Updated the [Statement Details page]({% link v24.1/ui-statements-page.md %}) to always show the entire selected period, instead of just the period that had data. #118680 +- Error messages displayed upon failure to load DB Console views now include information about the HTTP response status code, if one is present. #118782 +- The **Full Table/Index Scans** chart in the [SQL Metrics dashboard]({% link v24.1/ui-sql-dashboard.md %}) now shows the non-negative derivative of the number of full scans tracked. #118787 - The [Overload dashboard]({% link v24.1/ui-overload-dashboard.md %}) now includes two additional graphs: - **Elastic CPU Utilization**: displays the CPU utilization by elastic work, compared to the limit set for elastic work. - - **Elastic CPU Exhausted Duration Per Second**: displays the duration of CPU exhaustion by elastic work, in microseconds. [#118763][#118763] -- The `txn.restarts.writetooold` metric in the **Transaction Restarts** graph under the [SQL Dashboard]({% link v24.1/ui-sql-dashboard.md %}) now includes all restarts previously categorized as `txn.restarts.writetoooldmulti`. The former is a now a superset of the latter. The `txn.restarts.writetoooldmulti` metric will be removed in a future release. [#119411][#119411] + - **Elastic CPU Exhausted Duration Per Second**: displays the duration of CPU exhaustion by elastic work, in microseconds. #118763 +- The `txn.restarts.writetooold` metric in the **Transaction Restarts** graph under the [SQL Dashboard]({% link v24.1/ui-sql-dashboard.md %}) now includes all restarts previously categorized as `txn.restarts.writetoooldmulti`. The former is a now a superset of the latter. The `txn.restarts.writetoooldmulti` metric will be removed in a future release. #119411

Bug fixes

-- Fixed a bug that could cause an internal error during distributed execution for an expression like `CASE` that requires its inputs to be the same type with all `NULL` inputs. [#108892][#108892] -- Fixed `NULL` input handling for the geospatial [built-ins]({% link v24.1/functions-and-operators.md %}) `st_pointfromgeohash` and `st_geomfromgeohash`. [#113781][#113781] -- The geospatial `st_makeenvelope` [built-in]({% link v24.1/functions-and-operators.md %}) now correctly supports `xmin` or `ymin` to be greater than `xmax` or `ymax`, respectively. [#113781][#113781] -- Fixed a bug that could cause v23.1 nodes in clusters that had not [finalized the v23.1 version upgrade]({% link v24.1/upgrade-cockroach-version.md %}#step-3-decide-how-the-upgrade-will-be-finalized) to use excessive CPU retrying expected errors related to the incomplete upgrade state. [#113864][#113864] -- [Debug zip]({% link v24.1/cockroach-debug-zip.md %}) now does not fail on corrupted log files. [#113722][#113722] -- Placeholder arguments can now be used in [`SET TRANSACTION`]({% link v24.1/set-transaction.md %}) statements. [#113689][#113689] -- Previously, when the session variable `use_declarative_schema_changer` was set to `off`, [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) would delete any comments associated with the old primary index and old primary key constraint. This is inconsistent with the behavior of `use_declarative_schema_changer=on`, which is the default setting, where those comments would be carried over to the new primary index. Furthermore, the old behavior also caused a bug that could prevent command `SHOW CREATE t` from working. [#114354][#114354] -- Previously, when the session variable was set to `use_declarative_schema_changer=off` and there was an attempt to [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) on a table that has unique secondary indexes on new primary key columns, the unique secondary index would still incorrectly have old primary key columns as its `keySuffixColumn` after the `ALTER PRIMARY KEY`. This was problematic because a subsequent dropping of the old primary key columns would unexpectedly drop those unique secondary indexes as well, even without `CASCADE`. [#114622][#114622] -- [`ALTER BACKUP SCHEDULE`]({% link v24.1/alter-backup-schedule.md %}) can now be used to set `updates_cluster_last_backup_time_metric` without providing an explicit value, matching the behavior of the option when specified during [`CREATE SCHEDULE FOR BACKUP`]({% link v24.1/create-schedule-for-backup.md %}). [#113523][#113523] -- Previously, if a table had [secondary indexes]({% link v24.1/schema-design-indexes.md %}) that stored certain columns (`col`) using the [`STORING`]({% link v24.1/indexes.md %}#storing-columns) clause, followed by an [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) to `col`, an incorrect secondary index would persist. The secondary index would continue to have the `STORING` clause, despite the column being part of the primary key and the fact that CockroachDB does not permit secondary indexes to store any primary key columns. Now, after the `ALTER PRIMARY KEY`, the `STORING` clause is dropped on those secondary indexes. [#115214][#115214] -- Fixed a bug that caused uploads to [object-locked buckets]({% link v24.1/use-cloud-storage.md %}) to fail because of the absence of an `MD5` hash. [#115713][#115713] -- [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) now preserves the name of the original primary index when the session variable is `use_declarative_schema_changer=off`. [#115338][#115338] -- Fixed a bug where the [`unique-without-index-not-valid` constraint]({% link v24.1/unique.md %}) added to a table would cause the `create_statement` from `SHOW CREATE t` to not be executable and error with `unique constraint cannot be NOT VALID`. [#115354][#115354] -- Fixed a bug where an empty [full backup]({% link v24.1/take-full-and-incremental-backups.md %}) followed by non-empty incremental backups taken inside an application tenant might not allow a [restore]({% link v24.1/restore.md %}) due to the use of an incorrect SQL codec. [#116316][#116316] -- Fixed a bug in the [row-level TTL]({% link v24.1/row-level-ttl.md %}) job that would cause it to skip expired rows if the primary key of the table included columns of the collated `STRING` or `DECIMAL` type. [#116988][#116988] -- Incorrectly labeled [PL/pgSQL]({% link v24.1/plpgsql.md %}) blocks now return an expected syntax error. [#117608][#117608] -- [`CREATE EXTERNAL CONNECTION IF NOT EXISTS`]({% link v24.1/create-external-connection.md %}) no longer returns an error if the connection already exists. [#117312][#117312] -- CockroachDB now correctly uses the histograms on columns of collated [`STRING`]({% link v24.1/string.md %}) type. The bug has been present since before v22.1. [#117714][#117714] -- Improved an interaction during range [lease transfers]({% link v24.1/architecture/replication-layer.md %}#epoch-based-leases-table-data) that could cause `RETRY_ASYNC_WRITE_FAILURE` errors to be returned to clients. [#117840][#117840] -- Backfilling tables for [`CREATE TABLE AS`]({% link v24.1/create-table-as.md %}) or [`CREATE MATERIALIZED VIEW`]({% link v24.1/create-view.md %}) could get into a retry loop if data was deleted and those jobs took longer than the GC TTL. [#117877][#117877] -- [Decommissioning replicas]({% link v24.1/node-shutdown.md %}) that are part of a mis-replicated range will no longer get stuck on a rebalance operation that was falsely determined to be unsafe. [#117900][#117900] -- A memory leak within the insights system was found to occur when [`sql.metrics.transaction_details.enabled`]({% link v24.1/cluster-settings.md %}) was disabled, while leaving `sql.metrics.statement_details.enabled` enabled. This patch fixes the memory leak by preventing the collection of further statement and transaction insights when `sql.metrics.transaction_details.enabled` is disabled. [#117709][#117709] -- Fixed a rare panic that could happen during a `pg_dump` import that contains a function that has a subquery in one of its arguments, like `SELECT addgeometrycolumn(...)`. Now, attempting to import a `pg_dump` with such a function results in an expected error. [#118569][#118569] -- [`AUTO CREATE STATS`]({% link v24.1/show-jobs.md %}#show-automatic-jobs) jobs could previously lead to growth in an internal system table resulting in slower job-system related queries. [#118589][#118589] -- Fixed an issue in CockroachDB where, if operating on a Linux system outside of a CPU cgroup, the system would repeatedly log the error `unable to get CPU capacity` at 10-second intervals. [#118657][#118657] -- Fixed a bug where casts of [floats]({% link v24.1/float.md %}) to [integers]({% link v24.1/int.md %}) simply truncated the decimal portion. These casts now match the PostgreSQL behavior of rounding to the nearest integer, and in cases of a value halfway between two integers, rounding to the nearest even number. This aligns with the "round half to even" rule or "bankers' rounding", offering greater overall precision across a group of such cast operations. [#117798][#117798] -- Fixed a bug where statements like `ADD COLUMN j INT, ADD UNIQUE WITHOUT INDEX (j)`, which [add new columns]({% link v24.1/alter-table.md %}#add-column) with unique constraints without creating associated indexes, would fail with an internal error. [#118291][#118291] -- Previously, [altering]({% link v24.1/alter-table.md %}) from a [`REGIONAL BY ROW`]({% link v24.1/regional-tables.md %}#regional-by-row-tables) table to a [`REGIONAL BY TABLE`]({% link v24.1/regional-tables.md %}#regional-tables) table could cause leaseholders to never move to the database's primary region. This is now fixed. [#118001][#118001] -- Users with the [VIEWACTIVITY]({% link v24.1/security-reference/authorization.md %}#supported-privileges) privilege can now request statement bundles using `crdb_internal.request_statement_bundle` or through the DB Console [SQL Activity]({% link v24.1/security-reference/authorization.md %}#supported-privileges) page. [#118760][#118760] -- Fixed an internal error with a message like: `LeafTxn ... incompatible with locking request` that occurs when performing an update under [`READ COMMITTED` isolation]({% link v24.1/read-committed.md %}) which cascades to a table with multiple other foreign keys. [#118722][#118722] -- Fixed a bug where [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) could fail with an error `non-nullable column with no value! Index scanned ..` when validating recreated [secondary indexes]({% link v24.1/schema-design-indexes.md %}). [#118843][#118843] -- Fixed a bug where a sequence name allocated by [`SERIAL`]({% link v24.1/serial.md %}) that conflicted with an existing type name would cause an error. [#118861][#118861] -- Fixed a bug where [`COMMENT ON`]({% link v24.1/comment-on.md %}) statements could fail with an "unexpected value" error if multiple `COMMENT` statements were running concurrently. [#119007][#119007] -- Previously, in certain cases, using virtual tables such as `crdb_internal.system_jobs` could result in the internal error `attempting to append refresh spans after the tracked timestamp has moved forward`. This is now fixed. The bug was introduced in CockroachDB v23.1. [#119176][#119176] -- Fixed a bug where operations on the `crdb_internal.leases` table could cause a node to become unavailable due to a deadlock in the leasing subsystem. [#119305][#119305] -- If an individual [replica]({% link v24.1/architecture/replication-layer.md %})'s circuit breaker had tripped but the range was otherwise functional, for example, because the replica was partially partitioned away from the [leaseholder]({% link v24.1/architecture/replication-layer.md %}#leases), it was possible for a gateway to persistently error when contacting this replica instead of retrying against a functional leaseholder elsewhere. The [gateway]({% link v24.1/architecture/life-of-a-distributed-transaction.md %}#gateway) will now retry such errors against other replicas once. [#118737][#118737] -- Fixed a bug in changefeed [webhook sinks]({% link v24.1/changefeed-sinks.md %}#webhook-sink) where the HTTP request body may not be initialized on retries, resulting in the error `http: ContentLength=... with Body length 0`. [#119326][#119326] -- Fixed a bug where rangefeed resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a [range merge]({% link v24.1/architecture/distribution-layer.md %}#range-merges). [#119512][#119512] -- Fixed a condition where some files were not closed when inspecting backup metadata during BACKUP and RESTORE. Epic: none. [#119625][#119625] -- Fixed a bug where some backup metadata files opened during [`RESTORE`]({% link v24.1/restore.md %}) were not closed. [#119625][#119625] -- Fixed a bug that caused internal errors when executing an [`EXPORT`]({% link v24.1/export.md %}) statement where the query involved sorting by columns not explicitly included in the output, due to hidden columns in the input expression. [#119538][#119538] -- Fixed a bug where a warning about the need to refresh data would remain displayed on the Active Executions view of the [Statements]({% link v24.1/ui-statements-page.md %}#active-executions-view) and [Transactions]({% link v24.1/ui-transactions-page.md %}#active-executions-view) pages despite enabling **Auto Refresh**. [#118675][#118675] +- Fixed a bug that could cause an internal error during distributed execution for an expression like `CASE` that requires its inputs to be the same type with all `NULL` inputs. #108892 +- Fixed `NULL` input handling for the geospatial [built-ins]({% link v24.1/functions-and-operators.md %}) `st_pointfromgeohash` and `st_geomfromgeohash`. #113781 +- The geospatial `st_makeenvelope` [built-in]({% link v24.1/functions-and-operators.md %}) now correctly supports `xmin` or `ymin` to be greater than `xmax` or `ymax`, respectively. #113781 +- Fixed a bug that could cause v23.1 nodes in clusters that had not [finalized the v23.1 version upgrade]({% link v24.1/upgrade-cockroach-version.md %}#step-3-decide-how-the-upgrade-will-be-finalized) to use excessive CPU retrying expected errors related to the incomplete upgrade state. #113864 +- [Debug zip]({% link v24.1/cockroach-debug-zip.md %}) now does not fail on corrupted log files. #113722 +- Placeholder arguments can now be used in [`SET TRANSACTION`]({% link v24.1/set-transaction.md %}) statements. #113689 +- Previously, when the session variable `use_declarative_schema_changer` was set to `off`, [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) would delete any comments associated with the old primary index and old primary key constraint. This is inconsistent with the behavior of `use_declarative_schema_changer=on`, which is the default setting, where those comments would be carried over to the new primary index. Furthermore, the old behavior also caused a bug that could prevent command `SHOW CREATE t` from working. #114354 +- Previously, when the session variable was set to `use_declarative_schema_changer=off` and there was an attempt to [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) on a table that has unique secondary indexes on new primary key columns, the unique secondary index would still incorrectly have old primary key columns as its `keySuffixColumn` after the `ALTER PRIMARY KEY`. This was problematic because a subsequent dropping of the old primary key columns would unexpectedly drop those unique secondary indexes as well, even without `CASCADE`. #114622 +- [`ALTER BACKUP SCHEDULE`]({% link v24.1/alter-backup-schedule.md %}) can now be used to set `updates_cluster_last_backup_time_metric` without providing an explicit value, matching the behavior of the option when specified during [`CREATE SCHEDULE FOR BACKUP`]({% link v24.1/create-schedule-for-backup.md %}). #113523 +- Previously, if a table had [secondary indexes]({% link v24.1/schema-design-indexes.md %}) that stored certain columns (`col`) using the [`STORING`]({% link v24.1/indexes.md %}#storing-columns) clause, followed by an [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) to `col`, an incorrect secondary index would persist. The secondary index would continue to have the `STORING` clause, despite the column being part of the primary key and the fact that CockroachDB does not permit secondary indexes to store any primary key columns. Now, after the `ALTER PRIMARY KEY`, the `STORING` clause is dropped on those secondary indexes. #115214 +- Fixed a bug that caused uploads to [object-locked buckets]({% link v24.1/use-cloud-storage.md %}) to fail because of the absence of an `MD5` hash. #115713 +- [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) now preserves the name of the original primary index when the session variable is `use_declarative_schema_changer=off`. #115338 +- Fixed a bug where the [`unique-without-index-not-valid` constraint]({% link v24.1/unique.md %}) added to a table would cause the `create_statement` from `SHOW CREATE t` to not be executable and error with `unique constraint cannot be NOT VALID`. #115354 +- Fixed a bug where an empty [full backup]({% link v24.1/take-full-and-incremental-backups.md %}) followed by non-empty incremental backups taken inside an application tenant might not allow a [restore]({% link v24.1/restore.md %}) due to the use of an incorrect SQL codec. #116316 +- Fixed a bug in the [row-level TTL]({% link v24.1/row-level-ttl.md %}) job that would cause it to skip expired rows if the primary key of the table included columns of the collated `STRING` or `DECIMAL` type. #116988 +- Incorrectly labeled [PL/pgSQL]({% link v24.1/plpgsql.md %}) blocks now return an expected syntax error. #117608 +- [`CREATE EXTERNAL CONNECTION IF NOT EXISTS`]({% link v24.1/create-external-connection.md %}) no longer returns an error if the connection already exists. #117312 +- CockroachDB now correctly uses the histograms on columns of collated [`STRING`]({% link v24.1/string.md %}) type. The bug has been present since before v22.1. #117714 +- Improved an interaction during range [lease transfers]({% link v24.1/architecture/replication-layer.md %}#epoch-based-leases-table-data) that could cause `RETRY_ASYNC_WRITE_FAILURE` errors to be returned to clients. #117840 +- Backfilling tables for [`CREATE TABLE AS`]({% link v24.1/create-table-as.md %}) or [`CREATE MATERIALIZED VIEW`]({% link v24.1/create-view.md %}) could get into a retry loop if data was deleted and those jobs took longer than the GC TTL. #117877 +- [Decommissioning replicas]({% link v24.1/node-shutdown.md %}) that are part of a mis-replicated range will no longer get stuck on a rebalance operation that was falsely determined to be unsafe. #117900 +- A memory leak within the insights system was found to occur when [`sql.metrics.transaction_details.enabled`]({% link v24.1/cluster-settings.md %}) was disabled, while leaving `sql.metrics.statement_details.enabled` enabled. This patch fixes the memory leak by preventing the collection of further statement and transaction insights when `sql.metrics.transaction_details.enabled` is disabled. #117709 +- Fixed a rare panic that could happen during a `pg_dump` import that contains a function that has a subquery in one of its arguments, like `SELECT addgeometrycolumn(...)`. Now, attempting to import a `pg_dump` with such a function results in an expected error. #118569 +- [`AUTO CREATE STATS`]({% link v24.1/show-jobs.md %}#show-automatic-jobs) jobs could previously lead to growth in an internal system table resulting in slower job-system related queries. #118589 +- Fixed an issue in CockroachDB where, if operating on a Linux system outside of a CPU cgroup, the system would repeatedly log the error `unable to get CPU capacity` at 10-second intervals. #118657 +- Fixed a bug where casts of [floats]({% link v24.1/float.md %}) to [integers]({% link v24.1/int.md %}) simply truncated the decimal portion. These casts now match the PostgreSQL behavior of rounding to the nearest integer, and in cases of a value halfway between two integers, rounding to the nearest even number. This aligns with the "round half to even" rule or "bankers' rounding", offering greater overall precision across a group of such cast operations. #117798 +- Fixed a bug where statements like `ADD COLUMN j INT, ADD UNIQUE WITHOUT INDEX (j)`, which [add new columns]({% link v24.1/alter-table.md %}#add-column) with unique constraints without creating associated indexes, would fail with an internal error. #118291 +- Previously, [altering]({% link v24.1/alter-table.md %}) from a [`REGIONAL BY ROW`]({% link v24.1/regional-tables.md %}#regional-by-row-tables) table to a [`REGIONAL BY TABLE`]({% link v24.1/regional-tables.md %}#regional-tables) table could cause leaseholders to never move to the database's primary region. This is now fixed. #118001 +- Users with the [VIEWACTIVITY]({% link v24.1/security-reference/authorization.md %}#supported-privileges) privilege can now request statement bundles using `crdb_internal.request_statement_bundle` or through the DB Console [SQL Activity]({% link v24.1/security-reference/authorization.md %}#supported-privileges) page. #118760 +- Fixed an internal error with a message like: `LeafTxn ... incompatible with locking request` that occurs when performing an update under [`READ COMMITTED` isolation]({% link v24.1/read-committed.md %}) which cascades to a table with multiple other foreign keys. #118722 +- Fixed a bug where [`ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) could fail with an error `non-nullable column with no value! Index scanned ..` when validating recreated [secondary indexes]({% link v24.1/schema-design-indexes.md %}). #118843 +- Fixed a bug where a sequence name allocated by [`SERIAL`]({% link v24.1/serial.md %}) that conflicted with an existing type name would cause an error. #118861 +- Fixed a bug where [`COMMENT ON`]({% link v24.1/comment-on.md %}) statements could fail with an "unexpected value" error if multiple `COMMENT` statements were running concurrently. #119007 +- Previously, in certain cases, using virtual tables such as `crdb_internal.system_jobs` could result in the internal error `attempting to append refresh spans after the tracked timestamp has moved forward`. This is now fixed. The bug was introduced in CockroachDB v23.1. #119176 +- Fixed a bug where operations on the `crdb_internal.leases` table could cause a node to become unavailable due to a deadlock in the leasing subsystem. #119305 +- If an individual [replica]({% link v24.1/architecture/replication-layer.md %})'s circuit breaker had tripped but the range was otherwise functional, for example, because the replica was partially partitioned away from the [leaseholder]({% link v24.1/architecture/replication-layer.md %}#leases), it was possible for a gateway to persistently error when contacting this replica instead of retrying against a functional leaseholder elsewhere. The [gateway]({% link v24.1/architecture/life-of-a-distributed-transaction.md %}#gateway) will now retry such errors against other replicas once. #118737 +- Fixed a bug in changefeed [webhook sinks]({% link v24.1/changefeed-sinks.md %}#webhook-sink) where the HTTP request body may not be initialized on retries, resulting in the error `http: ContentLength=... with Body length 0`. #119326 +- Fixed a bug where rangefeed resolved timestamps could get stuck, continually emitting the log message `pushing old intents failed: range barrier failed, range split`, typically following a [range merge]({% link v24.1/architecture/distribution-layer.md %}#range-merges). #119512 +- Fixed a condition where some files were not closed when inspecting backup metadata during BACKUP and RESTORE. Epic: none. #119625 +- Fixed a bug where some backup metadata files opened during [`RESTORE`]({% link v24.1/restore.md %}) were not closed. #119625 +- Fixed a bug that caused internal errors when executing an [`EXPORT`]({% link v24.1/export.md %}) statement where the query involved sorting by columns not explicitly included in the output, due to hidden columns in the input expression. #119538 +- Fixed a bug where a warning about the need to refresh data would remain displayed on the Active Executions view of the [Statements]({% link v24.1/ui-statements-page.md %}#active-executions-view) and [Transactions]({% link v24.1/ui-transactions-page.md %}#active-executions-view) pages despite enabling **Auto Refresh**. #118675

Performance improvements

-- [Follower reads]({% link v24.1/follower-reads.md %}) for multi-region tables now default to prioritizing replicas in the same [locality]({% link v24.1/table-localities.md %}), when available, with node latency as a tie breaker. Previously, latency was the primary criteria. This can improve the performance and predictability of follower reads. [#112993][#112993] -- During node startup, stores are now loaded in parallel by default, reducing start times for nodes with many stores. [#115285][#115285] -- Improved the efficiency and performance of [encryption at rest]({% link v24.1/security-reference/encryption.md %}#encryption-at-rest). [#115454][#115454] -- Rangefeeds, the infrastructure used for [changefeeds]({% link v24.1/change-data-capture-overview.md %}), now use a more efficient engine that reduces the number of goroutines and the associated Go scheduler pressure and latency. [#114410][#114410] -- Rangefeeds, the infrastructure used for [changefeeds]({% link v24.1/change-data-capture-overview.md %}), now use a more efficient multiplexing protocol. [#114408][#114408] -- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) now generates constrained scans on indexes containing boolean, computed expressions. [#114798][#114798] -- A separate RPC connection class is now used for most [Raft]({% link v24.1/architecture/replication-layer.md %}#raft) traffic. This improves isolation and reduces interference with foreground SQL traffic, which reduces chances of head-of-line blocking caused by unrelated traffic under high-load conditions. The new `COCKROACH_RAFT_USE_DEFAULT_CONNECTION_CLASS` environment variable can be set to use the default connection class instead (the previous behavior). [#117385][#117385] -- Rangefeed traffic (typically for [changefeeds]({% link v24.1/change-data-capture-overview.md %})) is now separated into its own RPC connection class. This improves isolation and reduces interference with the foreground SQL traffic, which reduces chances of head-of-line blocking caused by unrelated traffic. The new `COCKROACH_RANGEFEED_USE_DEFAULT_CONNECTION_CLASS` environment variable can be set to use the default connection class, the previous default choice for rangefeeds. [#117730][#117730] -- The initial scan traffic for [changefeeds]({% link v24.1/change-data-capture-overview.md %}#stream-row-level-changes-with-changefeeds), which can be significant, now uses a different RPC/TCP connection than the foreground SQL/KV traffic. This reduces interference between workloads, and reduces chances of head-of-line blocking issues. [#117810][#117810] -- [`kafka_sink_config`]({% link v24.1/changefeed-sinks.md %}#kafka-sink-configuration) now supports specifying different client IDs for each changefeed, enabling users to define distinct Kafka quota configurations for each. For example, `CREATE CHANGEFEED FOR ... WITH kafka_sink_config='{"ClientID": "clientID1"}'` [#118643][#118643] -- Added the `changefeed.kafka_throttling_hist_nanos` metric, enhancing visibility into throttling times when CockroachDB operations exceed Kafka's quota limits. [#117693][#117693] -- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) now generates more efficient query plans for queries with comparisons of [timestamp]({% link v24.1/timestamp.md %}) and [interval]({% link v24.1/interval.md %}) columns, for example, `timestamp_col - '1 day'::INTERVAL > now()`. [#118307][#118307] -- Statements from internal executors (use of SQL queries by the cluster itself) now correctly display when filtering by application name `$ internal` on the Statements page in [SQL Activity]({% link v24.1/ui-overview.md %}#sql-activity). Such statements are hidden when `$ internal` is not specified. [#114498][#114498] +- [Follower reads]({% link v24.1/follower-reads.md %}) for multi-region tables now default to prioritizing replicas in the same [locality]({% link v24.1/table-localities.md %}), when available, with node latency as a tie breaker. Previously, latency was the primary criteria. This can improve the performance and predictability of follower reads. #112993 +- During node startup, stores are now loaded in parallel by default, reducing start times for nodes with many stores. #115285 +- Improved the efficiency and performance of [encryption at rest]({% link v24.1/security-reference/encryption.md %}#encryption-at-rest). #115454 +- Rangefeeds, the infrastructure used for [changefeeds]({% link v24.1/change-data-capture-overview.md %}), now use a more efficient engine that reduces the number of goroutines and the associated Go scheduler pressure and latency. #114410 +- Rangefeeds, the infrastructure used for [changefeeds]({% link v24.1/change-data-capture-overview.md %}), now use a more efficient multiplexing protocol. #114408 +- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) now generates constrained scans on indexes containing boolean, computed expressions. #114798 +- A separate RPC connection class is now used for most [Raft]({% link v24.1/architecture/replication-layer.md %}#raft) traffic. This improves isolation and reduces interference with foreground SQL traffic, which reduces chances of head-of-line blocking caused by unrelated traffic under high-load conditions. The new `COCKROACH_RAFT_USE_DEFAULT_CONNECTION_CLASS` environment variable can be set to use the default connection class instead (the previous behavior). #117385 +- Rangefeed traffic (typically for [changefeeds]({% link v24.1/change-data-capture-overview.md %})) is now separated into its own RPC connection class. This improves isolation and reduces interference with the foreground SQL traffic, which reduces chances of head-of-line blocking caused by unrelated traffic. The new `COCKROACH_RANGEFEED_USE_DEFAULT_CONNECTION_CLASS` environment variable can be set to use the default connection class, the previous default choice for rangefeeds. #117730 +- The initial scan traffic for [changefeeds]({% link v24.1/change-data-capture-overview.md %}#stream-row-level-changes-with-changefeeds), which can be significant, now uses a different RPC/TCP connection than the foreground SQL/KV traffic. This reduces interference between workloads, and reduces chances of head-of-line blocking issues. #117810 +- [`kafka_sink_config`]({% link v24.1/changefeed-sinks.md %}#kafka-sink-configuration) now supports specifying different client IDs for each changefeed, enabling users to define distinct Kafka quota configurations for each. For example, `CREATE CHANGEFEED FOR ... WITH kafka_sink_config='{"ClientID": "clientID1"}'` #118643 +- Added the `changefeed.kafka_throttling_hist_nanos` metric, enhancing visibility into throttling times when CockroachDB operations exceed Kafka's quota limits. #117693 +- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) now generates more efficient query plans for queries with comparisons of [timestamp]({% link v24.1/timestamp.md %}) and [interval]({% link v24.1/interval.md %}) columns, for example, `timestamp_col - '1 day'::INTERVAL > now()`. #118307 +- Statements from internal executors (use of SQL queries by the cluster itself) now correctly display when filtering by application name `$ internal` on the Statements page in [SQL Activity]({% link v24.1/ui-overview.md %}#sql-activity). Such statements are hidden when `$ internal` is not specified. #114498
@@ -198,223 +198,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#108892]: https://github.com/cockroachdb/cockroach/pull/108892 -[#110950]: https://github.com/cockroachdb/cockroach/pull/110950 -[#111179]: https://github.com/cockroachdb/cockroach/pull/111179 -[#111401]: https://github.com/cockroachdb/cockroach/pull/111401 -[#111424]: https://github.com/cockroachdb/cockroach/pull/111424 -[#111895]: https://github.com/cockroachdb/cockroach/pull/111895 -[#111986]: https://github.com/cockroachdb/cockroach/pull/111986 -[#112033]: https://github.com/cockroachdb/cockroach/pull/112033 -[#112118]: https://github.com/cockroachdb/cockroach/pull/112118 -[#112161]: https://github.com/cockroachdb/cockroach/pull/112161 -[#112162]: https://github.com/cockroachdb/cockroach/pull/112162 -[#112203]: https://github.com/cockroachdb/cockroach/pull/112203 -[#112350]: https://github.com/cockroachdb/cockroach/pull/112350 -[#112357]: https://github.com/cockroachdb/cockroach/pull/112357 -[#112971]: https://github.com/cockroachdb/cockroach/pull/112971 -[#112975]: https://github.com/cockroachdb/cockroach/pull/112975 -[#112993]: https://github.com/cockroachdb/cockroach/pull/112993 -[#113094]: https://github.com/cockroachdb/cockroach/pull/113094 -[#113149]: https://github.com/cockroachdb/cockroach/pull/113149 -[#113523]: https://github.com/cockroachdb/cockroach/pull/113523 -[#113599]: https://github.com/cockroachdb/cockroach/pull/113599 -[#113679]: https://github.com/cockroachdb/cockroach/pull/113679 -[#113689]: https://github.com/cockroachdb/cockroach/pull/113689 -[#113722]: https://github.com/cockroachdb/cockroach/pull/113722 -[#113741]: https://github.com/cockroachdb/cockroach/pull/113741 -[#113781]: https://github.com/cockroachdb/cockroach/pull/113781 -[#113860]: https://github.com/cockroachdb/cockroach/pull/113860 -[#113864]: https://github.com/cockroachdb/cockroach/pull/113864 -[#113893]: https://github.com/cockroachdb/cockroach/pull/113893 -[#113993]: https://github.com/cockroachdb/cockroach/pull/113993 -[#114046]: https://github.com/cockroachdb/cockroach/pull/114046 -[#114097]: https://github.com/cockroachdb/cockroach/pull/114097 -[#114148]: https://github.com/cockroachdb/cockroach/pull/114148 -[#114186]: https://github.com/cockroachdb/cockroach/pull/114186 -[#114305]: https://github.com/cockroachdb/cockroach/pull/114305 -[#114332]: https://github.com/cockroachdb/cockroach/pull/114332 -[#114354]: https://github.com/cockroachdb/cockroach/pull/114354 -[#114408]: https://github.com/cockroachdb/cockroach/pull/114408 -[#114410]: https://github.com/cockroachdb/cockroach/pull/114410 -[#114454]: https://github.com/cockroachdb/cockroach/pull/114454 -[#114498]: https://github.com/cockroachdb/cockroach/pull/114498 -[#114599]: https://github.com/cockroachdb/cockroach/pull/114599 -[#114622]: https://github.com/cockroachdb/cockroach/pull/114622 -[#114736]: https://github.com/cockroachdb/cockroach/pull/114736 -[#114746]: https://github.com/cockroachdb/cockroach/pull/114746 -[#114770]: https://github.com/cockroachdb/cockroach/pull/114770 -[#114798]: https://github.com/cockroachdb/cockroach/pull/114798 -[#114863]: https://github.com/cockroachdb/cockroach/pull/114863 -[#114876]: https://github.com/cockroachdb/cockroach/pull/114876 -[#114973]: https://github.com/cockroachdb/cockroach/pull/114973 -[#115048]: https://github.com/cockroachdb/cockroach/pull/115048 -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 -[#115169]: https://github.com/cockroachdb/cockroach/pull/115169 -[#115214]: https://github.com/cockroachdb/cockroach/pull/115214 -[#115223]: https://github.com/cockroachdb/cockroach/pull/115223 -[#115273]: https://github.com/cockroachdb/cockroach/pull/115273 -[#115285]: https://github.com/cockroachdb/cockroach/pull/115285 -[#115338]: https://github.com/cockroachdb/cockroach/pull/115338 -[#115354]: https://github.com/cockroachdb/cockroach/pull/115354 -[#115442]: https://github.com/cockroachdb/cockroach/pull/115442 -[#115454]: https://github.com/cockroachdb/cockroach/pull/115454 -[#115473]: https://github.com/cockroachdb/cockroach/pull/115473 -[#115511]: https://github.com/cockroachdb/cockroach/pull/115511 -[#115537]: https://github.com/cockroachdb/cockroach/pull/115537 -[#115713]: https://github.com/cockroachdb/cockroach/pull/115713 -[#115722]: https://github.com/cockroachdb/cockroach/pull/115722 -[#115733]: https://github.com/cockroachdb/cockroach/pull/115733 -[#115760]: https://github.com/cockroachdb/cockroach/pull/115760 -[#115833]: https://github.com/cockroachdb/cockroach/pull/115833 -[#116013]: https://github.com/cockroachdb/cockroach/pull/116013 -[#116213]: https://github.com/cockroachdb/cockroach/pull/116213 -[#116316]: https://github.com/cockroachdb/cockroach/pull/116316 -[#116410]: https://github.com/cockroachdb/cockroach/pull/116410 -[#116474]: https://github.com/cockroachdb/cockroach/pull/116474 -[#116498]: https://github.com/cockroachdb/cockroach/pull/116498 -[#116520]: https://github.com/cockroachdb/cockroach/pull/116520 -[#116664]: https://github.com/cockroachdb/cockroach/pull/116664 -[#116673]: https://github.com/cockroachdb/cockroach/pull/116673 -[#116699]: https://github.com/cockroachdb/cockroach/pull/116699 -[#116709]: https://github.com/cockroachdb/cockroach/pull/116709 -[#116778]: https://github.com/cockroachdb/cockroach/pull/116778 -[#116783]: https://github.com/cockroachdb/cockroach/pull/116783 -[#116830]: https://github.com/cockroachdb/cockroach/pull/116830 -[#116835]: https://github.com/cockroachdb/cockroach/pull/116835 -[#116879]: https://github.com/cockroachdb/cockroach/pull/116879 -[#116988]: https://github.com/cockroachdb/cockroach/pull/116988 -[#117011]: https://github.com/cockroachdb/cockroach/pull/117011 -[#117054]: https://github.com/cockroachdb/cockroach/pull/117054 -[#117095]: https://github.com/cockroachdb/cockroach/pull/117095 -[#117117]: https://github.com/cockroachdb/cockroach/pull/117117 -[#117191]: https://github.com/cockroachdb/cockroach/pull/117191 -[#117312]: https://github.com/cockroachdb/cockroach/pull/117312 -[#117357]: https://github.com/cockroachdb/cockroach/pull/117357 -[#117385]: https://github.com/cockroachdb/cockroach/pull/117385 -[#117429]: https://github.com/cockroachdb/cockroach/pull/117429 -[#117520]: https://github.com/cockroachdb/cockroach/pull/117520 -[#117545]: https://github.com/cockroachdb/cockroach/pull/117545 -[#117554]: https://github.com/cockroachdb/cockroach/pull/117554 -[#117560]: https://github.com/cockroachdb/cockroach/pull/117560 -[#117591]: https://github.com/cockroachdb/cockroach/pull/117591 -[#117608]: https://github.com/cockroachdb/cockroach/pull/117608 -[#117636]: https://github.com/cockroachdb/cockroach/pull/117636 -[#117656]: https://github.com/cockroachdb/cockroach/pull/117656 -[#117693]: https://github.com/cockroachdb/cockroach/pull/117693 -[#117709]: https://github.com/cockroachdb/cockroach/pull/117709 -[#117710]: https://github.com/cockroachdb/cockroach/pull/117710 -[#117714]: https://github.com/cockroachdb/cockroach/pull/117714 -[#117730]: https://github.com/cockroachdb/cockroach/pull/117730 -[#117776]: https://github.com/cockroachdb/cockroach/pull/117776 -[#117793]: https://github.com/cockroachdb/cockroach/pull/117793 -[#117798]: https://github.com/cockroachdb/cockroach/pull/117798 -[#117810]: https://github.com/cockroachdb/cockroach/pull/117810 -[#117838]: https://github.com/cockroachdb/cockroach/pull/117838 -[#117840]: https://github.com/cockroachdb/cockroach/pull/117840 -[#117875]: https://github.com/cockroachdb/cockroach/pull/117875 -[#117877]: https://github.com/cockroachdb/cockroach/pull/117877 -[#117900]: https://github.com/cockroachdb/cockroach/pull/117900 -[#117910]: https://github.com/cockroachdb/cockroach/pull/117910 -[#117923]: https://github.com/cockroachdb/cockroach/pull/117923 -[#117928]: https://github.com/cockroachdb/cockroach/pull/117928 -[#117936]: https://github.com/cockroachdb/cockroach/pull/117936 -[#117937]: https://github.com/cockroachdb/cockroach/pull/117937 -[#118001]: https://github.com/cockroachdb/cockroach/pull/118001 -[#118002]: https://github.com/cockroachdb/cockroach/pull/118002 -[#118010]: https://github.com/cockroachdb/cockroach/pull/118010 -[#118241]: https://github.com/cockroachdb/cockroach/pull/118241 -[#118255]: https://github.com/cockroachdb/cockroach/pull/118255 -[#118291]: https://github.com/cockroachdb/cockroach/pull/118291 -[#118300]: https://github.com/cockroachdb/cockroach/pull/118300 -[#118307]: https://github.com/cockroachdb/cockroach/pull/118307 -[#118440]: https://github.com/cockroachdb/cockroach/pull/118440 -[#118476]: https://github.com/cockroachdb/cockroach/pull/118476 -[#118478]: https://github.com/cockroachdb/cockroach/pull/118478 -[#118479]: https://github.com/cockroachdb/cockroach/pull/118479 -[#118502]: https://github.com/cockroachdb/cockroach/pull/118502 -[#118569]: https://github.com/cockroachdb/cockroach/pull/118569 -[#118583]: https://github.com/cockroachdb/cockroach/pull/118583 -[#118589]: https://github.com/cockroachdb/cockroach/pull/118589 -[#118610]: https://github.com/cockroachdb/cockroach/pull/118610 -[#118643]: https://github.com/cockroachdb/cockroach/pull/118643 -[#118644]: https://github.com/cockroachdb/cockroach/pull/118644 -[#118657]: https://github.com/cockroachdb/cockroach/pull/118657 -[#118675]: https://github.com/cockroachdb/cockroach/pull/118675 -[#118676]: https://github.com/cockroachdb/cockroach/pull/118676 -[#118680]: https://github.com/cockroachdb/cockroach/pull/118680 -[#118722]: https://github.com/cockroachdb/cockroach/pull/118722 -[#118737]: https://github.com/cockroachdb/cockroach/pull/118737 -[#118760]: https://github.com/cockroachdb/cockroach/pull/118760 -[#118763]: https://github.com/cockroachdb/cockroach/pull/118763 -[#118781]: https://github.com/cockroachdb/cockroach/pull/118781 -[#118782]: https://github.com/cockroachdb/cockroach/pull/118782 -[#118787]: https://github.com/cockroachdb/cockroach/pull/118787 -[#118843]: https://github.com/cockroachdb/cockroach/pull/118843 -[#118850]: https://github.com/cockroachdb/cockroach/pull/118850 -[#118861]: https://github.com/cockroachdb/cockroach/pull/118861 -[#118960]: https://github.com/cockroachdb/cockroach/pull/118960 -[#119007]: https://github.com/cockroachdb/cockroach/pull/119007 -[#119042]: https://github.com/cockroachdb/cockroach/pull/119042 -[#119104]: https://github.com/cockroachdb/cockroach/pull/119104 -[#119140]: https://github.com/cockroachdb/cockroach/pull/119140 -[#119149]: https://github.com/cockroachdb/cockroach/pull/119149 -[#119176]: https://github.com/cockroachdb/cockroach/pull/119176 -[#119205]: https://github.com/cockroachdb/cockroach/pull/119205 -[#119221]: https://github.com/cockroachdb/cockroach/pull/119221 -[#119250]: https://github.com/cockroachdb/cockroach/pull/119250 -[#119251]: https://github.com/cockroachdb/cockroach/pull/119251 -[#119305]: https://github.com/cockroachdb/cockroach/pull/119305 -[#119323]: https://github.com/cockroachdb/cockroach/pull/119323 -[#119326]: https://github.com/cockroachdb/cockroach/pull/119326 -[#119366]: https://github.com/cockroachdb/cockroach/pull/119366 -[#119411]: https://github.com/cockroachdb/cockroach/pull/119411 -[#119512]: https://github.com/cockroachdb/cockroach/pull/119512 -[#119528]: https://github.com/cockroachdb/cockroach/pull/119528 -[#119538]: https://github.com/cockroachdb/cockroach/pull/119538 -[#119611]: https://github.com/cockroachdb/cockroach/pull/119611 -[#119625]: https://github.com/cockroachdb/cockroach/pull/119625 -[05049deba]: https://github.com/cockroachdb/cockroach/commit/05049deba -[0625489bc]: https://github.com/cockroachdb/cockroach/commit/0625489bc -[08a9c1ae7]: https://github.com/cockroachdb/cockroach/commit/08a9c1ae7 -[08cccf790]: https://github.com/cockroachdb/cockroach/commit/08cccf790 -[0dd1de457]: https://github.com/cockroachdb/cockroach/commit/0dd1de457 -[141c20c89]: https://github.com/cockroachdb/cockroach/commit/141c20c89 -[14220d671]: https://github.com/cockroachdb/cockroach/commit/14220d671 -[1e3eaa26d]: https://github.com/cockroachdb/cockroach/commit/1e3eaa26d -[23228632a]: https://github.com/cockroachdb/cockroach/commit/23228632a -[2545159aa]: https://github.com/cockroachdb/cockroach/commit/2545159aa -[29229633a]: https://github.com/cockroachdb/cockroach/commit/29229633a -[30d4b2ea7]: https://github.com/cockroachdb/cockroach/commit/30d4b2ea7 -[3805f6f0a]: https://github.com/cockroachdb/cockroach/commit/3805f6f0a -[3b2d243f4]: https://github.com/cockroachdb/cockroach/commit/3b2d243f4 -[409975292]: https://github.com/cockroachdb/cockroach/commit/409975292 -[412d6b88b]: https://github.com/cockroachdb/cockroach/commit/412d6b88b -[480882fdb]: https://github.com/cockroachdb/cockroach/commit/480882fdb -[57d260f09]: https://github.com/cockroachdb/cockroach/commit/57d260f09 -[691b544bb]: https://github.com/cockroachdb/cockroach/commit/691b544bb -[69725225f]: https://github.com/cockroachdb/cockroach/commit/69725225f -[6bb93f29a]: https://github.com/cockroachdb/cockroach/commit/6bb93f29a -[6c2183e53]: https://github.com/cockroachdb/cockroach/commit/6c2183e53 -[874b3fc21]: https://github.com/cockroachdb/cockroach/commit/874b3fc21 -[8802beba3]: https://github.com/cockroachdb/cockroach/commit/8802beba3 -[88cfe7d55]: https://github.com/cockroachdb/cockroach/commit/88cfe7d55 -[900408efc]: https://github.com/cockroachdb/cockroach/commit/900408efc -[9fcc551ae]: https://github.com/cockroachdb/cockroach/commit/9fcc551ae -[b01c287bb]: https://github.com/cockroachdb/cockroach/commit/b01c287bb -[b03471454]: https://github.com/cockroachdb/cockroach/commit/b03471454 -[b5799bac6]: https://github.com/cockroachdb/cockroach/commit/b5799bac6 -[b9f2d19a7]: https://github.com/cockroachdb/cockroach/commit/b9f2d19a7 -[ba13697aa]: https://github.com/cockroachdb/cockroach/commit/ba13697aa -[c6ae316e5]: https://github.com/cockroachdb/cockroach/commit/c6ae316e5 -[cad1b02a1]: https://github.com/cockroachdb/cockroach/commit/cad1b02a1 -[cf685c9fa]: https://github.com/cockroachdb/cockroach/commit/cf685c9fa -[d457b0cca]: https://github.com/cockroachdb/cockroach/commit/d457b0cca -[dbff3a09a]: https://github.com/cockroachdb/cockroach/commit/dbff3a09a -[ea8aea78c]: https://github.com/cockroachdb/cockroach/commit/ea8aea78c -[ef4605c72]: https://github.com/cockroachdb/cockroach/commit/ef4605c72 -[f2b7e9f14]: https://github.com/cockroachdb/cockroach/commit/f2b7e9f14 -[f3a28a622]: https://github.com/cockroachdb/cockroach/commit/f3a28a622 -[f67decaf2]: https://github.com/cockroachdb/cockroach/commit/f67decaf2 -[f9241be8c]: https://github.com/cockroachdb/cockroach/commit/f9241be8c -[fd6d12c72]: https://github.com/cockroachdb/cockroach/commit/fd6d12c72 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-alpha.2.md b/src/current/_includes/releases/v24.1/v24.1.0-alpha.2.md index 91703fa7ad2..7eefeef4742 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-alpha.2.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-alpha.2.md @@ -6,31 +6,31 @@ Release Date: March 11, 2024

Security updates

-- [DB Console](https://www.cockroachlabs.com/docs/v23.2/ui-overview) `session` cookie is now marked `HttpOnly` to prevent it from being read by any JavaScript code. [#119261][#119261] -- [DB Console](https://www.cockroachlabs.com/docs/v23.2/ui-overview) cookies are now marked `Secure` for the browser when the cluster is running in secure mode. [#119261][#119261] +- [DB Console](https://www.cockroachlabs.com/docs/v23.2/ui-overview) `session` cookie is now marked `HttpOnly` to prevent it from being read by any JavaScript code. #119261 +- [DB Console](https://www.cockroachlabs.com/docs/v23.2/ui-overview) cookies are now marked `Secure` for the browser when the cluster is running in secure mode. #119261

General changes

-- [Gateways](https://www.cockroachlabs.com/docs/v23.2/architecture/life-of-a-distributed-transaction#gateway) will now detect faulty or stalled [replicas](https://www.cockroachlabs.com/docs/v23.2/architecture/overview#architecture-replica) and use other replicas instead, which can prevent them getting stuck in certain cases (e.g., with [disk stalls](https://www.cockroachlabs.com/docs/v23.2/cluster-setup-troubleshooting#disk-stalls)). This behavior can be disabled via the [cluster setting](https://www.cockroachlabs.com/docs/v23.2/cluster-settings) `kv.dist_sender.circuit_breaker.enabled`. [#118943][#118943] +- [Gateways](https://www.cockroachlabs.com/docs/v23.2/architecture/life-of-a-distributed-transaction#gateway) will now detect faulty or stalled [replicas](https://www.cockroachlabs.com/docs/v23.2/architecture/overview#architecture-replica) and use other replicas instead, which can prevent them getting stuck in certain cases (e.g., with [disk stalls](https://www.cockroachlabs.com/docs/v23.2/cluster-setup-troubleshooting#disk-stalls)). This behavior can be disabled via the [cluster setting](https://www.cockroachlabs.com/docs/v23.2/cluster-settings) `kv.dist_sender.circuit_breaker.enabled`. #118943

{{ site.data.products.enterprise }} edition changes

-- Added a new [`ALTER ROLE ... SUBJECT` option]({% link v24.1/alter-role.md %}#set-the-subject-role-option-for-certificate-based-authentication). This role option can be set to a subject distinguished name in [RFC 2253](https://www.ietf.org/rfc/rfc2253.html) or [RFC 4514](https://www.rfc-editor.org/rfc/rfc4514.html) format. If set, then during client certificate authentication, certs that do not match the configured distinguished name will be rejected. [#119135][#119135] -- [Changefeeds](https://www.cockroachlabs.com/docs/v23.2/create-changefeed) support a new scheme `azure-event-hub://` for Kafka data streaming to Azure event hubs. The `sinkURL` must include mandatory parameters `shared_access_key_name` and `shared_access_key`. By default and as required, the options `tls_enabled=true`, `sasl_handshake=true`, `sasl_enabled=true`, and `sasl_mechanism=PLAIN` are applied, as they are the only supported options. Other parameters such as `topic_name` and `topic_prefix` are also supported. An example URI is: `azure-event-hub://myeventhubs.servicebus.windows.net:9093?shared_access_key_name=abc&shared_access_key=123`. [#115806][#115806] +- Added a new [`ALTER ROLE ... SUBJECT` option]({% link v24.1/alter-role.md %}#set-the-subject-role-option-for-certificate-based-authentication). This role option can be set to a subject distinguished name in [RFC 2253](https://www.ietf.org/rfc/rfc2253.html) or [RFC 4514](https://www.rfc-editor.org/rfc/rfc4514.html) format. If set, then during client certificate authentication, certs that do not match the configured distinguished name will be rejected. #119135 +- [Changefeeds](https://www.cockroachlabs.com/docs/v23.2/create-changefeed) support a new scheme `azure-event-hub://` for Kafka data streaming to Azure event hubs. The `sinkURL` must include mandatory parameters `shared_access_key_name` and `shared_access_key`. By default and as required, the options `tls_enabled=true`, `sasl_handshake=true`, `sasl_enabled=true`, and `sasl_mechanism=PLAIN` are applied, as they are the only supported options. Other parameters such as `topic_name` and `topic_prefix` are also supported. An example URI is: `azure-event-hub://myeventhubs.servicebus.windows.net:9093?shared_access_key_name=abc&shared_access_key=123`. #115806

SQL language changes

-- Added an option for node-level [sequence](https://www.cockroachlabs.com/docs/v23.2/serial) caching. All the sessions on the node can share the same cache, which can be concurrently accessed. The `serial_normalization` [session variable](https://www.cockroachlabs.com/docs/v23.2/set-vars#supported-variables) can now be set to the value `sql_sequence_cached_node`. If this value is set, the [cluster setting](https://www.cockroachlabs.com/docs/v23.2/cluster-settings) `sql.defaults.serial_sequences_cache_size` can be used to control the number of values to cache in a node, with a default of 256. The `PER NODE CACHE` sequence option (syntax is `[ [ PER NODE ] CACHE # ]`) is now fully implemented and will allow nodes to cache sequence numbers. A cache size of 1 means there is no cache, and cache sizes of less than 1 are not valid. [#118546][#118546] +- Added an option for node-level [sequence](https://www.cockroachlabs.com/docs/v23.2/serial) caching. All the sessions on the node can share the same cache, which can be concurrently accessed. The `serial_normalization` [session variable](https://www.cockroachlabs.com/docs/v23.2/set-vars#supported-variables) can now be set to the value `sql_sequence_cached_node`. If this value is set, the [cluster setting](https://www.cockroachlabs.com/docs/v23.2/cluster-settings) `sql.defaults.serial_sequences_cache_size` can be used to control the number of values to cache in a node, with a default of 256. The `PER NODE CACHE` sequence option (syntax is `[ [ PER NODE ] CACHE # ]`) is now fully implemented and will allow nodes to cache sequence numbers. A cache size of 1 means there is no cache, and cache sizes of less than 1 are not valid. #118546

Bug fixes

-- Fixed a bug that prevented the use of [PL/pgSQL](https://www.cockroachlabs.com/docs/v23.2/plpgsql) routines with complex variable names that require double quotes. This bug had existed since v23.2. [#119034][#119034] -- Fixed a bug that could cause creation of a syntactically invalid [PL/pgSQL](https://www.cockroachlabs.com/docs/v23.2/plpgsql) routine to return the wrong error. This bug had existed since v23.2. [#119034][#119034] -- Fixed a bug that could result in a syntax error if a [PL/pgSQL](https://www.cockroachlabs.com/docs/v23.2/plpgsql) routine was created with an escaped string constant in the routine body. This bug had existed since v23.2. [#119034][#119034] -- Fixed a bug where running a [changefeed]({% link v23.2/change-data-capture-overview.md %}) that targets a table with a user-defined type column and with the [`envelope` option]({% link v23.2/create-changefeed.md %}#envelope) set to any value other than `wrapped` would cause a node panic due to a nil dereference. [#119639][#119639] -- Fixed a bug where running [`RESTORE`](https://www.cockroachlabs.com/docs/v23.2/restore) on certain backups would open a very large number of connections to the backup storage provider. [#119840][#119840] -- Previously, a user with the `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges) could see constants inside of queries that originated from other [users](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#roles) in the [`SHOW SESSIONS`](https://www.cockroachlabs.com/docs/v23.2/show-sessions) output. This information is now properly redacted. [#119820][#119820] -- Previously, the [`SHOW QUERIES`](https://www.cockroachlabs.com/docs/v23.2/show-queries) and [`SHOW STATEMENTS`](https://www.cockroachlabs.com/docs/v23.2/show-statements) commands incorrectly required the user to have the `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges). This is now fixed, as a user should always be able to view their own queries, even without this privilege. [#119820][#119820] +- Fixed a bug that prevented the use of [PL/pgSQL](https://www.cockroachlabs.com/docs/v23.2/plpgsql) routines with complex variable names that require double quotes. This bug had existed since v23.2. #119034 +- Fixed a bug that could cause creation of a syntactically invalid [PL/pgSQL](https://www.cockroachlabs.com/docs/v23.2/plpgsql) routine to return the wrong error. This bug had existed since v23.2. #119034 +- Fixed a bug that could result in a syntax error if a [PL/pgSQL](https://www.cockroachlabs.com/docs/v23.2/plpgsql) routine was created with an escaped string constant in the routine body. This bug had existed since v23.2. #119034 +- Fixed a bug where running a [changefeed]({% link v23.2/change-data-capture-overview.md %}) that targets a table with a user-defined type column and with the [`envelope` option]({% link v23.2/create-changefeed.md %}#envelope) set to any value other than `wrapped` would cause a node panic due to a nil dereference. #119639 +- Fixed a bug where running [`RESTORE`](https://www.cockroachlabs.com/docs/v23.2/restore) on certain backups would open a very large number of connections to the backup storage provider. #119840 +- Previously, a user with the `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges) could see constants inside of queries that originated from other [users](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#roles) in the [`SHOW SESSIONS`](https://www.cockroachlabs.com/docs/v23.2/show-sessions) output. This information is now properly redacted. #119820 +- Previously, the [`SHOW QUERIES`](https://www.cockroachlabs.com/docs/v23.2/show-queries) and [`SHOW STATEMENTS`](https://www.cockroachlabs.com/docs/v23.2/show-statements) commands incorrectly required the user to have the `VIEWACTIVITY` or `VIEWACTIVITYREDACTED` [privilege](https://www.cockroachlabs.com/docs/v23.2/security-reference/authorization#managing-privileges). This is now fixed, as a user should always be able to view their own queries, even without this privilege. #119820
@@ -40,55 +40,3 @@ This release includes 1939 merged PRs by 109 authors.
-[#111895]: https://github.com/cockroachdb/cockroach/pull/111895 -[#112162]: https://github.com/cockroachdb/cockroach/pull/112162 -[#112203]: https://github.com/cockroachdb/cockroach/pull/112203 -[#112357]: https://github.com/cockroachdb/cockroach/pull/112357 -[#113149]: https://github.com/cockroachdb/cockroach/pull/113149 -[#113599]: https://github.com/cockroachdb/cockroach/pull/113599 -[#113679]: https://github.com/cockroachdb/cockroach/pull/113679 -[#113741]: https://github.com/cockroachdb/cockroach/pull/113741 -[#113860]: https://github.com/cockroachdb/cockroach/pull/113860 -[#114097]: https://github.com/cockroachdb/cockroach/pull/114097 -[#114148]: https://github.com/cockroachdb/cockroach/pull/114148 -[#114305]: https://github.com/cockroachdb/cockroach/pull/114305 -[#114454]: https://github.com/cockroachdb/cockroach/pull/114454 -[#114498]: https://github.com/cockroachdb/cockroach/pull/114498 -[#114736]: https://github.com/cockroachdb/cockroach/pull/114736 -[#114863]: https://github.com/cockroachdb/cockroach/pull/114863 -[#114876]: https://github.com/cockroachdb/cockroach/pull/114876 -[#114973]: https://github.com/cockroachdb/cockroach/pull/114973 -[#115048]: https://github.com/cockroachdb/cockroach/pull/115048 -[#115169]: https://github.com/cockroachdb/cockroach/pull/115169 -[#115473]: https://github.com/cockroachdb/cockroach/pull/115473 -[#115511]: https://github.com/cockroachdb/cockroach/pull/115511 -[#115806]: https://github.com/cockroachdb/cockroach/pull/115806 -[#116213]: https://github.com/cockroachdb/cockroach/pull/116213 -[#116474]: https://github.com/cockroachdb/cockroach/pull/116474 -[#116699]: https://github.com/cockroachdb/cockroach/pull/116699 -[#116778]: https://github.com/cockroachdb/cockroach/pull/116778 -[#116783]: https://github.com/cockroachdb/cockroach/pull/116783 -[#116879]: https://github.com/cockroachdb/cockroach/pull/116879 -[#117054]: https://github.com/cockroachdb/cockroach/pull/117054 -[#117095]: https://github.com/cockroachdb/cockroach/pull/117095 -[#117117]: https://github.com/cockroachdb/cockroach/pull/117117 -[#117191]: https://github.com/cockroachdb/cockroach/pull/117191 -[#117554]: https://github.com/cockroachdb/cockroach/pull/117554 -[#117937]: https://github.com/cockroachdb/cockroach/pull/117937 -[#118255]: https://github.com/cockroachdb/cockroach/pull/118255 -[#118476]: https://github.com/cockroachdb/cockroach/pull/118476 -[#118502]: https://github.com/cockroachdb/cockroach/pull/118502 -[#118546]: https://github.com/cockroachdb/cockroach/pull/118546 -[#118583]: https://github.com/cockroachdb/cockroach/pull/118583 -[#118780]: https://github.com/cockroachdb/cockroach/pull/118780 -[#118943]: https://github.com/cockroachdb/cockroach/pull/118943 -[#119034]: https://github.com/cockroachdb/cockroach/pull/119034 -[#119135]: https://github.com/cockroachdb/cockroach/pull/119135 -[#119250]: https://github.com/cockroachdb/cockroach/pull/119250 -[#119261]: https://github.com/cockroachdb/cockroach/pull/119261 -[#119323]: https://github.com/cockroachdb/cockroach/pull/119323 -[#119639]: https://github.com/cockroachdb/cockroach/pull/119639 -[#119763]: https://github.com/cockroachdb/cockroach/pull/119763 -[#119807]: https://github.com/cockroachdb/cockroach/pull/119807 -[#119820]: https://github.com/cockroachdb/cockroach/pull/119820 -[#119840]: https://github.com/cockroachdb/cockroach/pull/119840 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-alpha.3.md b/src/current/_includes/releases/v24.1/v24.1.0-alpha.3.md index a9c256e8cf7..c48fe4b463e 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-alpha.3.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-alpha.3.md @@ -6,12 +6,12 @@ Release Date: March 18, 2024

{{ site.data.products.enterprise }} edition changes

-- [`cockroach gen encryption-key`](../v24.1/encryption.html) now accepts a `--version=2` parameter. Version 2 keys activate a new encryption implementation with improved performance. This is expected to become the default in CockroachDB v24.2. [#119913][#119913] +- [`cockroach gen encryption-key`](../v24.1/encryption.html) now accepts a `--version=2` parameter. Version 2 keys activate a new encryption implementation with improved performance. This is expected to become the default in CockroachDB v24.2. #119913

SQL language changes

-- Mutation statements such as [`UPDATE`](../v24.1/update.html) and [`DELETE`](../v24.1/delete.html) as well as locking statements such as [`SELECT FOR UPDATE`](../v24.1/select-for-update.html) are not allowed in read-only transactions or [`AS OF SYSTEM TIME`](../v24.1/as-of-system-time.html) transactions. This fixes an oversight where CockroachDB was allowing mutation statements and locking statements in implicit single-statement transactions using `AS OF SYSTEM TIME`. [#120097][#120097] -- Added support for `RETURN` statements with no expression for routines with `OUT` parameters and routines with a `VOID` return type. [#120043][#120043] +- Mutation statements such as [`UPDATE`](../v24.1/update.html) and [`DELETE`](../v24.1/delete.html) as well as locking statements such as [`SELECT FOR UPDATE`](../v24.1/select-for-update.html) are not allowed in read-only transactions or [`AS OF SYSTEM TIME`](../v24.1/as-of-system-time.html) transactions. This fixes an oversight where CockroachDB was allowing mutation statements and locking statements in implicit single-statement transactions using `AS OF SYSTEM TIME`. #120097 +- Added support for `RETURN` statements with no expression for routines with `OUT` parameters and routines with a `VOID` return type. #120043 - [`ALTER COLUMN`](../v24.1/alter-table.html#alter-column) can now change columns to an identity column by using the syntax in one of the following: {% include_cached copy-clipboard.html %} @@ -28,25 +28,25 @@ Release Date: March 18, 2024 ALTER TABLE t ALTER COLUMN c SET GENERATED BY DEFAULT ~~~ - [#115889][#115889] + #115889 -- Added support for the [PL/pgSQL](../v24.1/plpgsql.html) `NULL` statement. [#119037][#119037] -- [`crdb_internal.leases`](../v24.1/crdb-internal.html) is now behind the [`VIEWCLUSTERMETADATA` privilege](../v24.1/security-reference/authorization.html#supported-privileges). [#120014][#120014] -- [PL/pgSQL](../v24.1/plpgsql.html) blocks can now be nested in a block that has an exception handler. [#120045][#120045] +- Added support for the [PL/pgSQL](../v24.1/plpgsql.html) `NULL` statement. #119037 +- [`crdb_internal.leases`](../v24.1/crdb-internal.html) is now behind the [`VIEWCLUSTERMETADATA` privilege](../v24.1/security-reference/authorization.html#supported-privileges). #120014 +- [PL/pgSQL](../v24.1/plpgsql.html) blocks can now be nested in a block that has an exception handler. #120045

DB Console changes

-- Resolved an issue where clusters with multiple stores per node may list inaccurate region/node information in the [**Databases**](../v24.1/ui-databases-page.html) page. [#119260][#119260] -- `VIEW` type tables will no longer display in the DB Console [**Databases**](../v24.1/ui-databases-page.html) pages. Previously these would be listed with no information, only displaying errors. [#119890][#119890] -- Fixed an intermittent page crash in the [**Schema Insights**](../v24.1/ui-insights-page.html#schema-insights-tab) tab. [#120137][#120137] -- Fixed a bug where the **Rows written** value was incorrectly showing the **Rows read** value on the [**Insights**](../v24.1/ui-insights-page.html) page. [#120145][#120145] +- Resolved an issue where clusters with multiple stores per node may list inaccurate region/node information in the [**Databases**](../v24.1/ui-databases-page.html) page. #119260 +- `VIEW` type tables will no longer display in the DB Console [**Databases**](../v24.1/ui-databases-page.html) pages. Previously these would be listed with no information, only displaying errors. #119890 +- Fixed an intermittent page crash in the [**Schema Insights**](../v24.1/ui-insights-page.html#schema-insights-tab) tab. #120137 +- Fixed a bug where the **Rows written** value was incorrectly showing the **Rows read** value on the [**Insights**](../v24.1/ui-insights-page.html) page. #120145

Bug fixes

-- Fixed a bug that occurred when using [`ALTER TABLE`](../v24.1/alter-table.html) to drop and re-add a [`CHECK` constraint](../v24.1/check.html) with the same name. [#120008][#120008] -- Fixed a bug in which it was possible to [`SET transaction_read_only = false`](../v24.1/show-vars.html#supported-variables) during an [`AS OF SYSTEM TIME`](../v24.1/as-of-system-time.html) transaction. [#120097][#120097] -- Fixed a bug that caused a slow memory leak that could accumulate when opening many new connections. The bug was present in v22.2.9+ and v23.1+ versions. [#119799][#119799] +- Fixed a bug that occurred when using [`ALTER TABLE`](../v24.1/alter-table.html) to drop and re-add a [`CHECK` constraint](../v24.1/check.html) with the same name. #120008 +- Fixed a bug in which it was possible to [`SET transaction_read_only = false`](../v24.1/show-vars.html#supported-variables) during an [`AS OF SYSTEM TIME`](../v24.1/as-of-system-time.html) transaction. #120097 +- Fixed a bug that caused a slow memory leak that could accumulate when opening many new connections. The bug was present in v22.2.9+ and v23.1+ versions. #119799

Contributors

@@ -57,18 +57,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#115889]: https://github.com/cockroachdb/cockroach/pull/115889 -[#119037]: https://github.com/cockroachdb/cockroach/pull/119037 -[#119260]: https://github.com/cockroachdb/cockroach/pull/119260 -[#119752]: https://github.com/cockroachdb/cockroach/pull/119752 -[#119799]: https://github.com/cockroachdb/cockroach/pull/119799 -[#119890]: https://github.com/cockroachdb/cockroach/pull/119890 -[#119913]: https://github.com/cockroachdb/cockroach/pull/119913 -[#119996]: https://github.com/cockroachdb/cockroach/pull/119996 -[#120008]: https://github.com/cockroachdb/cockroach/pull/120008 -[#120014]: https://github.com/cockroachdb/cockroach/pull/120014 -[#120043]: https://github.com/cockroachdb/cockroach/pull/120043 -[#120045]: https://github.com/cockroachdb/cockroach/pull/120045 -[#120097]: https://github.com/cockroachdb/cockroach/pull/120097 -[#120137]: https://github.com/cockroachdb/cockroach/pull/120137 -[#120145]: https://github.com/cockroachdb/cockroach/pull/120145 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-alpha.4.md b/src/current/_includes/releases/v24.1/v24.1.0-alpha.4.md index 96142fe3c6d..b5f512d3e1e 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-alpha.4.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-alpha.4.md @@ -6,7 +6,7 @@ Release Date: March 25, 2024

Security updates

-- When [configuring logs](../v24.1/configure-logs.html#output-to-files), `file-permissions` are now applied literally, such that `file-permissions: 644` will result in files with permissions matching `644` (instead of the previous behavior's `640`). Previously, CockroachDB's `umask` (which is always at least `007`) was being applied after the `file-permissions` field was used to create files, meaning the resulting permissions did not match those specified in the log configuration. [#120669][#120669] +- When [configuring logs](../v24.1/configure-logs.html#output-to-files), `file-permissions` are now applied literally, such that `file-permissions: 644` will result in files with permissions matching `644` (instead of the previous behavior's `640`). Previously, CockroachDB's `umask` (which is always at least `007`) was being applied after the `file-permissions` field was used to create files, meaning the resulting permissions did not match those specified in the log configuration. #120669

General changes

@@ -21,53 +21,53 @@ Release Date: March 25, 2024 - `storage.disk.weightedio.time` - `storage.disk.iopsinprogress` - The metrics match the definitions of the `sys.host.disk.*` system metrics. [#119885][#119885] + The metrics match the definitions of the `sys.host.disk.*` system metrics. #119885

{{ site.data.products.enterprise }} edition changes

-- `server.controller.default_target_cluster` can now be set to any virtual cluster name by default, including a virtual cluster yet to be created or have service started. [#120080][#120080] -- The [`READ COMMITTED`](../v24.1/read-committed.html) isolation level now requires the cluster to have a valid enterprise license. [#120154][#120154] -- The new boolean changefeed option [`ignore_disable_changefeed_replication`](../v24.1/create-changefeed.html#ignore-disable-changefeed-replication), when set to `true`, prevents the changefeed from filtering events even if CDC filtering is configured via the `disable_changefeed_replication` [session variable](../v24.1/session-variables.html), `sql.ttl.changefeed_replication.disabled` [cluster setting](../v24.1/cluster-settings.html), or the `ttl_disable_changefeed_replication` [table storage parameter](../v24.1/alter-table.html#table-storage-parameters). [#120255][#120255] +- `server.controller.default_target_cluster` can now be set to any virtual cluster name by default, including a virtual cluster yet to be created or have service started. #120080 +- The [`READ COMMITTED`](../v24.1/read-committed.html) isolation level now requires the cluster to have a valid enterprise license. #120154 +- The new boolean changefeed option [`ignore_disable_changefeed_replication`](../v24.1/create-changefeed.html#ignore-disable-changefeed-replication), when set to `true`, prevents the changefeed from filtering events even if CDC filtering is configured via the `disable_changefeed_replication` [session variable](../v24.1/session-variables.html), `sql.ttl.changefeed_replication.disabled` [cluster setting](../v24.1/cluster-settings.html), or the `ttl_disable_changefeed_replication` [table storage parameter](../v24.1/alter-table.html#table-storage-parameters). #120255

SQL language changes

-- Added support for the [PL/pgSQL](../v24.1/plpgsql.html) `COMMIT` and `ROLLBACK` statements. [#119647][#119647] -- [Identity columns](../v24.1/create-table.html#identity-columns) now support enhanced sequence management through the [`ALTER [COLUMN] column_name SET sequence_option`](../v24.1/alter-table.html#alter-column) and [`ALTER [COLUMN] column_name RESTART [WITH restart]`](../v24.1/alter-table.html#alter-column) commands. This update facilitates the fine-tuning of identity column sequences. [#119432][#119432] -- It is now possible to use the `STRICT` option with [`SELECT ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) and [`RETURNING ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) in order to enforce that a SQL statement within a [PL/pgSQL](../v24.1/plpgsql.html) routine returns exactly one row. [#120486][#120486] -- Added a [session setting](../v24.1/session-variables.html) `plpgsql_use_strict_into`, which causes PL/pgSQL [`SELECT ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) and [`RETURNING ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) to require exactly one row from the SQL statement, similar to Oracle behavior. [#120486][#120486] -- Added a new `failure_count INT NOT NULL` column to `crdb_internal.node_statement_statistics`. It represents the number of recorded statement execution failures for the given statement, as a new component of the overall statistics. [#120236][#120236] -- The `FORCE_INVERTED_INDEX` hint causes the [optimizer](../v24.1/cost-based-optimizer.html) to prefer a query plan scan over any inverted index of the hinted table. An error is emitted if no such query plan can be generated. [#120384][#120384] -- The `REPAIRCLUSTERMETADATA` privilege has been aliased to `REPAIRCLUSTER`. Both names can be used interchangeably. [#116844][#116844] +- Added support for the [PL/pgSQL](../v24.1/plpgsql.html) `COMMIT` and `ROLLBACK` statements. #119647 +- [Identity columns](../v24.1/create-table.html#identity-columns) now support enhanced sequence management through the [`ALTER [COLUMN] column_name SET sequence_option`](../v24.1/alter-table.html#alter-column) and [`ALTER [COLUMN] column_name RESTART [WITH restart]`](../v24.1/alter-table.html#alter-column) commands. This update facilitates the fine-tuning of identity column sequences. #119432 +- It is now possible to use the `STRICT` option with [`SELECT ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) and [`RETURNING ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) in order to enforce that a SQL statement within a [PL/pgSQL](../v24.1/plpgsql.html) routine returns exactly one row. #120486 +- Added a [session setting](../v24.1/session-variables.html) `plpgsql_use_strict_into`, which causes PL/pgSQL [`SELECT ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) and [`RETURNING ... INTO`](../v24.1/plpgsql.html#assign-a-result-to-a-variable) to require exactly one row from the SQL statement, similar to Oracle behavior. #120486 +- Added a new `failure_count INT NOT NULL` column to `crdb_internal.node_statement_statistics`. It represents the number of recorded statement execution failures for the given statement, as a new component of the overall statistics. #120236 +- The `FORCE_INVERTED_INDEX` hint causes the [optimizer](../v24.1/cost-based-optimizer.html) to prefer a query plan scan over any inverted index of the hinted table. An error is emitted if no such query plan can be generated. #120384 +- The `REPAIRCLUSTERMETADATA` privilege has been aliased to `REPAIRCLUSTER`. Both names can be used interchangeably. #116844

Operational changes

-- The new [`cockroach start`](../v24.1/cockroach-start.html) option [`--wal-failover=among-stores` or `COCKROACH_WAL_FAILOVER=among-stores`]({% link v24.1/cockroach-start.md %}#write-ahead-log-wal-failover) environment variable will configure a multi-store CockroachDB node to fail over a store's write-ahead log (WAL) to another store's data directory. Failing over the write-ahead log may allow some operations against a store to continue completing, even if the underlying storage is temporarily unavailable. This feature is in [preview]({% link v24.1/cockroachdb-feature-availability.md %}#features-in-preview). [#120509][#120509] -- The new `storage.wal_failover.unhealthy_op_threshold` [cluster setting](../v24.1/cluster-settings.html) allows configuring the latency threshold at which a WAL write is considered unhealthy. [#120509][#120509] +- The new [`cockroach start`](../v24.1/cockroach-start.html) option [`--wal-failover=among-stores` or `COCKROACH_WAL_FAILOVER=among-stores`]({% link v24.1/cockroach-start.md %}#write-ahead-log-wal-failover) environment variable will configure a multi-store CockroachDB node to fail over a store's write-ahead log (WAL) to another store's data directory. Failing over the write-ahead log may allow some operations against a store to continue completing, even if the underlying storage is temporarily unavailable. This feature is in [preview]({% link v24.1/cockroachdb-feature-availability.md %}#features-in-preview). #120509 +- The new `storage.wal_failover.unhealthy_op_threshold` [cluster setting](../v24.1/cluster-settings.html) allows configuring the latency threshold at which a WAL write is considered unhealthy. #120509 - Two new metrics track the status of the SQL Activity Update job, which pre-aggregates top K information within the SQL statistics subsytem and writes the results to `system.statement_activity` and `system.transaction_activity`: - `sql.stats.activity.updates.successful`: Number of successful updates made by the SQL activity updater job. - - `sql.stats.activity.update.latency`: The latency of updates made by the SQL activity updater job. Includes failed update attempts. [#120522][#120522] -- Added a new counter metric, `sql.stats.flush.done_signals.ignored`, that tracks the number of times the SQL activity update job has ignored the signal that indicates that a flush has completed. This metric may indicate that the SQL activity update job is taking longer than expected to complete. [#120522][#120522] -- Added a new counter metric, `sql.stats.activity.updates.failed`, to measure the number of update attempts made by the SQL activity update job that failed with errors. [#120522][#120522] -- Added a new counter metric, `sql.stats.flush.fingerprint.count`, that tracks the number of unique statement and transaction fingerprints included in the SQL stats flush. [#120522][#120522] -- The `/_status/stores` endpoint now includes `node_id`, `dir`, and `wal_failover_path` fields to show the store's node ID, data directory, and path to the configured WAL failover secondary, if configured. [#120677][#120677] + - `sql.stats.activity.update.latency`: The latency of updates made by the SQL activity updater job. Includes failed update attempts. #120522 +- Added a new counter metric, `sql.stats.flush.done_signals.ignored`, that tracks the number of times the SQL activity update job has ignored the signal that indicates that a flush has completed. This metric may indicate that the SQL activity update job is taking longer than expected to complete. #120522 +- Added a new counter metric, `sql.stats.activity.updates.failed`, to measure the number of update attempts made by the SQL activity update job that failed with errors. #120522 +- Added a new counter metric, `sql.stats.flush.fingerprint.count`, that tracks the number of unique statement and transaction fingerprints included in the SQL stats flush. #120522 +- The `/_status/stores` endpoint now includes `node_id`, `dir`, and `wal_failover_path` fields to show the store's node ID, data directory, and path to the configured WAL failover secondary, if configured. #120677

Command-line changes

-- The new `--go-gc-percent` flag of the [`cockroach start`](../v24.1/cockroach-start.html) command controls the garbage collection target percentage of the Go runtime, mirroring the existing `GOGC` environment variable. A garbage collection is triggered when the ratio of freshly allocated data to live data remaining after the previous collection reaches this percentage. If left unspecified and if a Go soft memory limit is configured (i.e., not explicitly disabled via `--max-go-memory` or `GOMEMLIMIT`), the garbage collection target percentage defaults to 300%. Setting the flag to a negative value disables the target percentage garbage collection heuristic, and only the soft memory limit heuristic triggers garbage collection. To monitor the impact of this change in the DB Console, look for an increase in **Memory usage** in the [Hardware dashboard]({% link v24.1/ui-hardware-dashboard.md %}#memory-usage) and an increase in **Go total memory usage** in the [Runtime dashboard]({% link v24.1/ui-runtime-dashboard.md %}#memory-usage). This does not increase the risk of an out-of-memory exception (OOM), because the Go memory limit (controlled by the `--max-go-memory` flag or the `GOMEMLIMIT` environment variable) prevents Go from consuming too much memory. [#119605][#119605] +- The new `--go-gc-percent` flag of the [`cockroach start`](../v24.1/cockroach-start.html) command controls the garbage collection target percentage of the Go runtime, mirroring the existing `GOGC` environment variable. A garbage collection is triggered when the ratio of freshly allocated data to live data remaining after the previous collection reaches this percentage. If left unspecified and if a Go soft memory limit is configured (i.e., not explicitly disabled via `--max-go-memory` or `GOMEMLIMIT`), the garbage collection target percentage defaults to 300%. Setting the flag to a negative value disables the target percentage garbage collection heuristic, and only the soft memory limit heuristic triggers garbage collection. To monitor the impact of this change in the DB Console, look for an increase in **Memory usage** in the [Hardware dashboard]({% link v24.1/ui-hardware-dashboard.md %}#memory-usage) and an increase in **Go total memory usage** in the [Runtime dashboard]({% link v24.1/ui-runtime-dashboard.md %}#memory-usage). This does not increase the risk of an out-of-memory exception (OOM), because the Go memory limit (controlled by the `--max-go-memory` flag or the `GOMEMLIMIT` environment variable) prevents Go from consuming too much memory. #119605

DB Console changes

-- The [**Queues** dashboard](../v24.1/ui-queues-dashboard.html) now includes lease queue metrics. [#119386][#119386] -- The DB Console **SQL Activity** [**Statement Fingerprint**](../v24.1/ui-statements-page.html#statement-fingerprint-page) page has replaced the **Failed?** boolean column with a **Failure Count** column that shows the number of failed executions for the given statement fingerprint.
In the **SQL Activity** table, the same statement fingeprint no longer appears in separate rows for failed executions and successful executions. Instead, they are combined into a single statement fingerprint. [#120236][#120236] -- The DB Console now displays an alert message when a license has expired or will expire in fewer than 15 days. [#120490][#120490] +- The [**Queues** dashboard](../v24.1/ui-queues-dashboard.html) now includes lease queue metrics. #119386 +- The DB Console **SQL Activity** [**Statement Fingerprint**](../v24.1/ui-statements-page.html#statement-fingerprint-page) page has replaced the **Failed?** boolean column with a **Failure Count** column that shows the number of failed executions for the given statement fingerprint.
In the **SQL Activity** table, the same statement fingeprint no longer appears in separate rows for failed executions and successful executions. Instead, they are combined into a single statement fingerprint. #120236 +- The DB Console now displays an alert message when a license has expired or will expire in fewer than 15 days. #120490

Bug fixes

-- Fixed a bug with [`DROP SCHEMA ... CASCADE`](../v24.1/drop-schema.html) that could lead to dangling function references in other schemas accessing any functions. [#119932][#119932] -- Fixed a bug where a [`RESTORE`](../v24.1/restore.html) of a backup that itself contained a table created by the `RESTORE` of a table with an in-progress [`IMPORT INTO`](../v24.1/import-into.html) would fail to restore all rows. [#120414][#120414] -- Fixed a bug where [identity columns](../v24.1/create-table.html#identity-columns) without any configured sequence options did not display the default values for identity attributes in `information_schema`. [#119459][#119459] -- Fixed a bug where a [`GRANT ... ON ALL TABLES`](../v24.1/grant.html) statement could fail if sequences existed and they did not support a privilege (e.g., `BACKUP`). [#120685][#120685] -- Fixed a bug where an [`EXPLAIN (DDL)`](../v24.1/explain.html) statement would generate event log entries for schema changes that were not executed. [#120563][#120563] +- Fixed a bug with [`DROP SCHEMA ... CASCADE`](../v24.1/drop-schema.html) that could lead to dangling function references in other schemas accessing any functions. #119932 +- Fixed a bug where a [`RESTORE`](../v24.1/restore.html) of a backup that itself contained a table created by the `RESTORE` of a table with an in-progress [`IMPORT INTO`](../v24.1/import-into.html) would fail to restore all rows. #120414 +- Fixed a bug where [identity columns](../v24.1/create-table.html#identity-columns) without any configured sequence options did not display the default values for identity attributes in `information_schema`. #119459 +- Fixed a bug where a [`GRANT ... ON ALL TABLES`](../v24.1/grant.html) statement could fail if sequences existed and they did not support a privilege (e.g., `BACKUP`). #120685 +- Fixed a bug where an [`EXPLAIN (DDL)`](../v24.1/explain.html) statement would generate event log entries for schema changes that were not executed. #120563
@@ -77,38 +77,3 @@ This release includes 153 merged PRs by 179 authors.
-[#116844]: https://github.com/cockroachdb/cockroach/pull/116844 -[#119386]: https://github.com/cockroachdb/cockroach/pull/119386 -[#119432]: https://github.com/cockroachdb/cockroach/pull/119432 -[#119459]: https://github.com/cockroachdb/cockroach/pull/119459 -[#119605]: https://github.com/cockroachdb/cockroach/pull/119605 -[#119647]: https://github.com/cockroachdb/cockroach/pull/119647 -[#119819]: https://github.com/cockroachdb/cockroach/pull/119819 -[#119847]: https://github.com/cockroachdb/cockroach/pull/119847 -[#119885]: https://github.com/cockroachdb/cockroach/pull/119885 -[#119906]: https://github.com/cockroachdb/cockroach/pull/119906 -[#119932]: https://github.com/cockroachdb/cockroach/pull/119932 -[#120019]: https://github.com/cockroachdb/cockroach/pull/120019 -[#120080]: https://github.com/cockroachdb/cockroach/pull/120080 -[#120135]: https://github.com/cockroachdb/cockroach/pull/120135 -[#120154]: https://github.com/cockroachdb/cockroach/pull/120154 -[#120236]: https://github.com/cockroachdb/cockroach/pull/120236 -[#120237]: https://github.com/cockroachdb/cockroach/pull/120237 -[#120255]: https://github.com/cockroachdb/cockroach/pull/120255 -[#120263]: https://github.com/cockroachdb/cockroach/pull/120263 -[#120384]: https://github.com/cockroachdb/cockroach/pull/120384 -[#120414]: https://github.com/cockroachdb/cockroach/pull/120414 -[#120486]: https://github.com/cockroachdb/cockroach/pull/120486 -[#120490]: https://github.com/cockroachdb/cockroach/pull/120490 -[#120509]: https://github.com/cockroachdb/cockroach/pull/120509 -[#120522]: https://github.com/cockroachdb/cockroach/pull/120522 -[#120563]: https://github.com/cockroachdb/cockroach/pull/120563 -[#120669]: https://github.com/cockroachdb/cockroach/pull/120669 -[#120677]: https://github.com/cockroachdb/cockroach/pull/120677 -[#120685]: https://github.com/cockroachdb/cockroach/pull/120685 -[119853c8d]: https://github.com/cockroachdb/cockroach/commit/119853c8d -[13066e191]: https://github.com/cockroachdb/cockroach/commit/13066e191 -[6fd7ff8b1]: https://github.com/cockroachdb/cockroach/commit/6fd7ff8b1 -[83a77498b]: https://github.com/cockroachdb/cockroach/commit/83a77498b -[87c5b14ae]: https://github.com/cockroachdb/cockroach/commit/87c5b14ae -[b4994a99c]: https://github.com/cockroachdb/cockroach/commit/b4994a99c diff --git a/src/current/_includes/releases/v24.1/v24.1.0-alpha.5.md b/src/current/_includes/releases/v24.1/v24.1.0-alpha.5.md index a16f0a3f67e..ce14273170c 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-alpha.5.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-alpha.5.md @@ -6,20 +6,20 @@ Release Date: April 1, 2024

{{ site.data.products.enterprise }} edition changes

-- [Changefeeds]({% link v24.1/change-data-capture-overview.md %}) now default to evenly distributing their work across all replicas, including followers, regardless of leaseholder placement. On upgrade to v24.1, running changefeed jobs will be restarted automatically as part of the upgrade process and will default to distributing work across replicas. To disable this behavior, set the [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.random_replica_selection.enabled ` to `false`. If disabled, changefeed planning reverts to its previous behavior for distributing work. [#120077][#120077] -- When [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) is enabled, the output of the `SHOW VIRTUAL CLUSTER ... WITH REPLICATION STATUS` command now displays replication lag. [#120782][#120782] +- [Changefeeds]({% link v24.1/change-data-capture-overview.md %}) now default to evenly distributing their work across all replicas, including followers, regardless of leaseholder placement. On upgrade to v24.1, running changefeed jobs will be restarted automatically as part of the upgrade process and will default to distributing work across replicas. To disable this behavior, set the [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.random_replica_selection.enabled ` to `false`. If disabled, changefeed planning reverts to its previous behavior for distributing work. #120077 +- When [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) is enabled, the output of the `SHOW VIRTUAL CLUSTER ... WITH REPLICATION STATUS` command now displays replication lag. #120782 - When [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) is enabled, the output of the `SHOW VIRTUAL CLUSTER WITH REPLICATION STATUS to 1` command has changed: - The output no longer displays `replication_job_id` or `service_mode` return fields. - The `data_state` field has been renamed to `status`. - - The fields that are displayed are now ordered as follows: `retained_time`, `replicated_time`, `replication_lag`, `cutover_time`, `status`. [#120782][#120782] -- You can now run [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) from an [existing CockroachDB cluster]({% link v24.1/set-up-physical-cluster-replication.md %}#set-up-pcr-from-an-existing-cluster), without [cluster virtualization]({% link v24.1/cluster-virtualization-overview.md %}) enabled, to a standby cluster with cluster virtualization enabled. [#122001][#122001] + - The fields that are displayed are now ordered as follows: `retained_time`, `replicated_time`, `replication_lag`, `cutover_time`, `status`. #120782 +- You can now run [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) from an [existing CockroachDB cluster]({% link v24.1/set-up-physical-cluster-replication.md %}#set-up-pcr-from-an-existing-cluster), without [cluster virtualization]({% link v24.1/cluster-virtualization-overview.md %}) enabled, to a standby cluster with cluster virtualization enabled. #122001

SQL language changes

-- You can now specify a condition for the [PL/pgSQL statements]({% link v24.1/plpgsql.md %}) `EXIT` and `CONTINUE`. [#120686][#120686] -- A [stored procedure]({% link v24.1/stored-procedures.md %}) can now invoke another stored procedure using a [`CALL` statement]({% link v24.1/call.md %}). [#120674][#120674] -- You can now use a [`SET TRANSACTION`]({% link v24.1/set-transaction.md %}) statement within a [PL/pgSQL stored procedure]({% link v24.1/plpgsql.md %}) to configure the transaction isolation level, timestamp, or priority, or to set the transaction to read-only. A `SET TRANSACTION` statement must immediately follow a `COMMIT` or `ROLLBACK`, with no other statements or block boundaries between them. [#120456][#120456] -- The new [session variable]({% link v24.1/session-variables.md %}) `optimizer_use_virtual_computed_column_stats`, when enabled, configures the [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) to use [table statistics]({% link v24.1/show-statistics.md %}) on virtual computed columns. [#120668][#120668] +- You can now specify a condition for the [PL/pgSQL statements]({% link v24.1/plpgsql.md %}) `EXIT` and `CONTINUE`. #120686 +- A [stored procedure]({% link v24.1/stored-procedures.md %}) can now invoke another stored procedure using a [`CALL` statement]({% link v24.1/call.md %}). #120674 +- You can now use a [`SET TRANSACTION`]({% link v24.1/set-transaction.md %}) statement within a [PL/pgSQL stored procedure]({% link v24.1/plpgsql.md %}) to configure the transaction isolation level, timestamp, or priority, or to set the transaction to read-only. A `SET TRANSACTION` statement must immediately follow a `COMMIT` or `ROLLBACK`, with no other statements or block boundaries between them. #120456 +- The new [session variable]({% link v24.1/session-variables.md %}) `optimizer_use_virtual_computed_column_stats`, when enabled, configures the [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) to use [table statistics]({% link v24.1/show-statistics.md %}) on virtual computed columns. #120668 - An [identity column]({% link v24.1/create-table.md %}#identity-columns) can now drop the `IDENTITY` constraint and related sequence using the following SQL statement: {% include_cached copy-clipboard.html %} @@ -27,53 +27,53 @@ Release Date: April 1, 2024 ALTER TABLE {table_name} ALTER COLUMN {column_name} DROP IDENTITY [IF EXISTS]; ~~~ - `IF EXISTS` is optional, and skips the command if the column is not an identity column. [#119263][#119263] + `IF EXISTS` is optional, and skips the command if the column is not an identity column. #119263 -- A shared lock that is acquired explicitly using [`SELECT FOR SHARE`]({% link v24.1/select-for-update.md %}) or implicitly by a [read-committed transaction]({% link v24.1/read-committed.md %}), can now be re-acquired with higher strength by either using a [`SELECT FOR UPDATE`]({% link v24.1/select-for-update.md %}) statement or by writing to the key. [#119671][#119671] -- [Stored procedures]({% link v24.1/stored-procedures.md %}) now support `OUT` and `INOUT` parameter classes. [#120851][#120851] -- The [PL/pgSQL]({% link v24.1/plpgsql.md %}) `EXIT` and `CONTINUE` statements can now use labels to specify which loop or block is the target. [#120733][#120733] +- A shared lock that is acquired explicitly using [`SELECT FOR SHARE`]({% link v24.1/select-for-update.md %}) or implicitly by a [read-committed transaction]({% link v24.1/read-committed.md %}), can now be re-acquired with higher strength by either using a [`SELECT FOR UPDATE`]({% link v24.1/select-for-update.md %}) statement or by writing to the key. #119671 +- [Stored procedures]({% link v24.1/stored-procedures.md %}) now support `OUT` and `INOUT` parameter classes. #120851 +- The [PL/pgSQL]({% link v24.1/plpgsql.md %}) `EXIT` and `CONTINUE` statements can now use labels to specify which loop or block is the target. #120733

Operational changes

-- You can now enable asynchronous buffering of `file-group` [log sinks]({% link v24.1/configure-logs.md %}) using the `buffering` [configuration options]({% link v24.1/configure-logs.md %}#log-buffering-for-network-sinks) either by default or to an individual `file-group`. The `buffering` configuration option is incompatible with the `buffered-writes` configuration option. To try the `buffering` option, you must set `buffered-writes: false`. Cockroach Labs recommends setting `max-staleness` to `1s` and `flush-trigger-size` to `256KiB`. [#120428][#120428] -- A minimum [Raft scheduler]({% link v24.1/architecture/replication-layer.md %}#raft) concurrency is now enforced per store so that nodes with many stores do not spread workers too thin. This helps to avoid high scheduler latency across replicas on a store when load is imbalanced. [#120162][#120162] -- The new [metrics]({% link v24.1/metrics.md %}) `kv.split.estimated_stats` and `kv.split.total_bytes_estimates` track the number of splits that produce [MVCC]({% link v24.1/architecture/storage-layer.md %}#mvcc) statistic estimates and the total bytes of estimates produced. [#119894][#119894] -- The new [cluster setting]({% link v24.1/cluster-settings.md %}) `storage.sstable.compression_algorithm` configures the compression algorithm used when compressing sstable blocks. [#120784][#120784] -- The new [cluster setting]({% link v24.1/cluster-settings.md %}) `kv.dist_sender.proxy.enabled`, which is enabled by default, causes proxy requests to be routed through a follower replica when the leaseholder is unavailable. [#117340][#117340] -- The new startup flag `--wal-failover` allows you to explicitly set the path for WAL failover of a single-store node. [#120783][#120783] -- Cluster virtualization is now enabled using either of the new startup flags `--virtualized` or `--virtualized-empty` instead of the `--config-profile` flag. [#120813][#120813] +- You can now enable asynchronous buffering of `file-group` [log sinks]({% link v24.1/configure-logs.md %}) using the `buffering` [configuration options]({% link v24.1/configure-logs.md %}#log-buffering-for-network-sinks) either by default or to an individual `file-group`. The `buffering` configuration option is incompatible with the `buffered-writes` configuration option. To try the `buffering` option, you must set `buffered-writes: false`. Cockroach Labs recommends setting `max-staleness` to `1s` and `flush-trigger-size` to `256KiB`. #120428 +- A minimum [Raft scheduler]({% link v24.1/architecture/replication-layer.md %}#raft) concurrency is now enforced per store so that nodes with many stores do not spread workers too thin. This helps to avoid high scheduler latency across replicas on a store when load is imbalanced. #120162 +- The new [metrics]({% link v24.1/metrics.md %}) `kv.split.estimated_stats` and `kv.split.total_bytes_estimates` track the number of splits that produce [MVCC]({% link v24.1/architecture/storage-layer.md %}#mvcc) statistic estimates and the total bytes of estimates produced. #119894 +- The new [cluster setting]({% link v24.1/cluster-settings.md %}) `storage.sstable.compression_algorithm` configures the compression algorithm used when compressing sstable blocks. #120784 +- The new [cluster setting]({% link v24.1/cluster-settings.md %}) `kv.dist_sender.proxy.enabled`, which is enabled by default, causes proxy requests to be routed through a follower replica when the leaseholder is unavailable. #117340 +- The new startup flag `--wal-failover` allows you to explicitly set the path for WAL failover of a single-store node. #120783 +- Cluster virtualization is now enabled using either of the new startup flags `--virtualized` or `--virtualized-empty` instead of the `--config-profile` flag. #120813 - The following metrics, which track the SQL statistics subsystem's task to flush in-memory statistics to persisted storage, are now more consistent with other metrics used in the subsystem. - `sql.stats.flushes.successful`: Number of times SQL statistics have been flushed successfully to persistent storage. - `sql.stats.flushes.failed`: Number of attempted SQL statistics flushes that failed with errors. - - `sql.stats.flush.latency`: The latency of attempted SQL statistics flushes to persistent storage, including both successes and failures. [#120709][#120709] + - `sql.stats.flush.latency`: The latency of attempted SQL statistics flushes to persistent storage, including both successes and failures. #120709 - The following new [metrics]({% link v24.1/metrics.md %}) track the number and outcome of proxy requests when `kv.dist_sender.proxy.enabled` is set to `true`: - `distsender.rpc.proxy.sent` - `distsender.rpc.proxy.err` - `distsender.rpc.proxy.forward.sent` - `distsender.rpc.proxy.forward.err` - Cockroach Labs recommends monitoring and alerting on `distsender.rpc.proxy.sent`, because it indicates a possible network partition. [#120239][#120239] + Cockroach Labs recommends monitoring and alerting on `distsender.rpc.proxy.sent`, because it indicates a possible network partition. #120239 -- The `provisioned-rate` field of a node's store specification can no longer be used to add constraints for the disk name or bandwidth. By default, bandwidth is constrained according to the [cluster setting]({% link v24.1/cluster-settings.md %}) `kv.store.admission.provisioned_bandwidth`. To override this setting for a specific node, the storage specification must contain `provisioned-rate=bandwidth={bandwidth-bytes/s}`. [#120895][#120895] -- Removal of the [cluster setting]({% link v24.1/cluster-settings.md %}) `kv.rangefeed.scheduler.enabled`, which was announced in [v24.1.0-alpha.1](https://www.cockroachlabs.com/docs/releases/v24.1.html#v24-1-0-alpha-1), has been reverted, and the cluster setting is reinstated. [#121164][#121164] +- The `provisioned-rate` field of a node's store specification can no longer be used to add constraints for the disk name or bandwidth. By default, bandwidth is constrained according to the [cluster setting]({% link v24.1/cluster-settings.md %}) `kv.store.admission.provisioned_bandwidth`. To override this setting for a specific node, the storage specification must contain `provisioned-rate=bandwidth={bandwidth-bytes/s}`. #120895 +- Removal of the [cluster setting]({% link v24.1/cluster-settings.md %}) `kv.rangefeed.scheduler.enabled`, which was announced in [v24.1.0-alpha.1](https://www.cockroachlabs.com/docs/releases/v24.1.html#v24-1-0-alpha-1), has been reverted, and the cluster setting is reinstated. #121164

DB Console changes

-- In generated statement fingerprints in the DB Console [**Statements** page]({% link v24.1/ui-statements-page.md %}), lists with only literals or placeholders or similar subexpressions are shortened to their first item followed by "__more__". [#120507][#120507] +- In generated statement fingerprints in the DB Console [**Statements** page]({% link v24.1/ui-statements-page.md %}), lists with only literals or placeholders or similar subexpressions are shortened to their first item followed by "__more__". #120507

Bug fixes

- Fixed a bug introduced in v23.2 that could cause a [PL/pgSQL]({% link v24.1/plpgsql.md %}) routine to return incorrect results when the routine included: 1. At least one parameter. - 1. An `IF` statement with one leak-proof branch and one branch with side effects. [#120451][#120451] -- Fixed a rare bug where a `BACKUP` command issued shortly after an [`ALTER TABLE {table_name} SET (exclude_data_from_backup = true)`]({% link v24.1/take-full-and-incremental-backups.md %}#exclude-a-tables-data-from-backups) could exclude data from an unrelated table from the backup. [#120188][#120188] -- Fixed a behavior where a memory exhaustion error during a schema change was treated as a permanent failure and reverted. Such schema changes are now retried instead of reverted. [#120806][#120806] -- Fixed a bug where the `attname` for a dropped column was not correctly padded with 8 `.` characters to be compatible with PostgreSQL. [#120861][#120861] + 1. An `IF` statement with one leak-proof branch and one branch with side effects. #120451 +- Fixed a rare bug where a `BACKUP` command issued shortly after an [`ALTER TABLE {table_name} SET (exclude_data_from_backup = true)`]({% link v24.1/take-full-and-incremental-backups.md %}#exclude-a-tables-data-from-backups) could exclude data from an unrelated table from the backup. #120188 +- Fixed a behavior where a memory exhaustion error during a schema change was treated as a permanent failure and reverted. Such schema changes are now retried instead of reverted. #120806 +- Fixed a bug where the `attname` for a dropped column was not correctly padded with 8 `.` characters to be compatible with PostgreSQL. #120861

Performance improvements

-- Splits no longer hold latches for time proportional to the range size while computing MVCC statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894][#119894] -- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) now generates more efficient query plans for some queries with `OFFSET` clauses. [#121160][#121160] +- Splits no longer hold latches for time proportional to the range size while computing MVCC statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 +- The [cost-based optimizer]({% link v24.1/cost-based-optimizer.md %}) now generates more efficient query plans for some queries with `OFFSET` clauses. #121160
@@ -87,33 +87,3 @@ We would like to thank the following contributors from the CockroachDB community
-[#117340]: https://github.com/cockroachdb/cockroach/pull/117340 -[#119263]: https://github.com/cockroachdb/cockroach/pull/119263 -[#119671]: https://github.com/cockroachdb/cockroach/pull/119671 -[#119894]: https://github.com/cockroachdb/cockroach/pull/119894 -[#120077]: https://github.com/cockroachdb/cockroach/pull/120077 -[#120162]: https://github.com/cockroachdb/cockroach/pull/120162 -[#120188]: https://github.com/cockroachdb/cockroach/pull/120188 -[#120239]: https://github.com/cockroachdb/cockroach/pull/120239 -[#120428]: https://github.com/cockroachdb/cockroach/pull/120428 -[#120451]: https://github.com/cockroachdb/cockroach/pull/120451 -[#120456]: https://github.com/cockroachdb/cockroach/pull/120456 -[#120507]: https://github.com/cockroachdb/cockroach/pull/120507 -[#120668]: https://github.com/cockroachdb/cockroach/pull/120668 -[#120674]: https://github.com/cockroachdb/cockroach/pull/120674 -[#120686]: https://github.com/cockroachdb/cockroach/pull/120686 -[#120709]: https://github.com/cockroachdb/cockroach/pull/120709 -[#120733]: https://github.com/cockroachdb/cockroach/pull/120733 -[#120782]: https://github.com/cockroachdb/cockroach/pull/120782 -[#120783]: https://github.com/cockroachdb/cockroach/pull/120783 -[#120784]: https://github.com/cockroachdb/cockroach/pull/120784 -[#120806]: https://github.com/cockroachdb/cockroach/pull/120806 -[#120813]: https://github.com/cockroachdb/cockroach/pull/120813 -[#120846]: https://github.com/cockroachdb/cockroach/pull/120846 -[#120851]: https://github.com/cockroachdb/cockroach/pull/120851 -[#120861]: https://github.com/cockroachdb/cockroach/pull/120861 -[#120895]: https://github.com/cockroachdb/cockroach/pull/120895 -[#121023]: https://github.com/cockroachdb/cockroach/pull/121023 -[#121160]: https://github.com/cockroachdb/cockroach/pull/121160 -[#121164]: https://github.com/cockroachdb/cockroach/pull/121164 -[#122001]: https://github.com/cockroachdb/cockroach/pull/122001 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-beta.1.md b/src/current/_includes/releases/v24.1/v24.1.0-beta.1.md index 4a0ee755e5f..0f291008298 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-beta.1.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-beta.1.md @@ -6,31 +6,31 @@ Release Date: April 17, 2024

SQL language changes

-- [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) no longer displays some internal retry counter columns (`last_run`, `next_run`, `num_runs`) and now only shows the `statement`, `trace_id`, and `execution_errors` columns when inspecting a specific job ID or IDs. [#121286][#121286] -- [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) now shortens long job descriptions to 100 characters to make the table easier to read while the full description and statement can be inspected using `SHOW JOB` or `SHOW JOBS` on specific job IDs. [#121286][#121286] -- Extended [`pg_depend`]({% link v24.1/pg-catalog.md %}) to include dependencies between UDFs. [#121313][#121313] -- With [`sql_safe_updates`]({% link v24.1/session-variables.md %}) set to `true`, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link v24.1/select-for-update.md %}) statements now return an error if they do not contain either a `WHERE` clause or a `LIMIT` clause. Also, `UPDATE` and `DELETE` statements without `WHERE` clauses but with `LIMIT` clauses now bypass `sql_safe_updates`, which better matches MySQL behavior. [#121466][#121466] -- Added support for [PL/pgSQL]({% link v24.1/plpgsql.md %}) `CALL` statements. It is now possible to call a stored procedure from a PL/pgSQL routine. [#121743][#121743] -- `DEFAULT` expressions for input parameters of [user-defined functions]({% link v24.1/user-defined-functions.md %}) and stored procedures are now supported. [#121811][#121811] +- [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) no longer displays some internal retry counter columns (`last_run`, `next_run`, `num_runs`) and now only shows the `statement`, `trace_id`, and `execution_errors` columns when inspecting a specific job ID or IDs. #121286 +- [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) now shortens long job descriptions to 100 characters to make the table easier to read while the full description and statement can be inspected using `SHOW JOB` or `SHOW JOBS` on specific job IDs. #121286 +- Extended [`pg_depend`]({% link v24.1/pg-catalog.md %}) to include dependencies between UDFs. #121313 +- With [`sql_safe_updates`]({% link v24.1/session-variables.md %}) set to `true`, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link v24.1/select-for-update.md %}) statements now return an error if they do not contain either a `WHERE` clause or a `LIMIT` clause. Also, `UPDATE` and `DELETE` statements without `WHERE` clauses but with `LIMIT` clauses now bypass `sql_safe_updates`, which better matches MySQL behavior. #121466 +- Added support for [PL/pgSQL]({% link v24.1/plpgsql.md %}) `CALL` statements. It is now possible to call a stored procedure from a PL/pgSQL routine. #121743 +- `DEFAULT` expressions for input parameters of [user-defined functions]({% link v24.1/user-defined-functions.md %}) and stored procedures are now supported. #121811

Command-line changes

-- The [`--enterprise-encryption`]({% link v24.1/encryption.md %}#starting-a-node-with-encryption) flag now accepts the special value `path=*` to apply the specified keys to all stores. [#121111][#121111] +- The [`--enterprise-encryption`]({% link v24.1/encryption.md %}#starting-a-node-with-encryption) flag now accepts the special value `path=*` to apply the specified keys to all stores. #121111

DB Console changes

-- The **Commit Latency** chart in the [**Changefeeds** dashboard]({% link v24.1/ui-cdc-dashboard.md %}) now aggregates by max instead of by sum for multi-node [changefeeds]({% link v24.1/change-data-capture-overview.md %}). This more accurately reflects the amount of time for events to be acknowledged by the downstream sink. [#120787][#120787] -- Introduced a license expiration message in the [DB Console]({% link v24.1/ui-overview.md %}) in the top-right corner of the primary header. This message indicates the remaining days before license expiration for clusters with an {{ site.data.products.enterprise }} or trial license. [#120830][#120830] -- The [**Jobs** table]({% link v24.1/ui-jobs-page.md %}#jobs-table) page no longer includes two columns related to a deprecated internal implementation detail (last execution time and execution count). [#121286][#121286] -- The timeseries graphs shown on the [**SQL Activity**]({% link v24.1/ui-overview.md %}#sql-activity) statement details page in the DB Console will now render properly, after fixing a bug related to setting the time range of the charts. [#121461][#121461] -- [Index recommendations]({% link v24.1/ui-databases-page.md %}#index-recommendations) in the DB Console will now function properly for indexes on tables or columns whose names contain quotation marks or whitespace. For example: `CREATE INDEX ON "my table" ("my col");`. [#122120][#122120] +- The **Commit Latency** chart in the [**Changefeeds** dashboard]({% link v24.1/ui-cdc-dashboard.md %}) now aggregates by max instead of by sum for multi-node [changefeeds]({% link v24.1/change-data-capture-overview.md %}). This more accurately reflects the amount of time for events to be acknowledged by the downstream sink. #120787 +- Introduced a license expiration message in the [DB Console]({% link v24.1/ui-overview.md %}) in the top-right corner of the primary header. This message indicates the remaining days before license expiration for clusters with an {{ site.data.products.enterprise }} or trial license. #120830 +- The [**Jobs** table]({% link v24.1/ui-jobs-page.md %}#jobs-table) page no longer includes two columns related to a deprecated internal implementation detail (last execution time and execution count). #121286 +- The timeseries graphs shown on the [**SQL Activity**]({% link v24.1/ui-overview.md %}#sql-activity) statement details page in the DB Console will now render properly, after fixing a bug related to setting the time range of the charts. #121461 +- [Index recommendations]({% link v24.1/ui-databases-page.md %}#index-recommendations) in the DB Console will now function properly for indexes on tables or columns whose names contain quotation marks or whitespace. For example: `CREATE INDEX ON "my table" ("my col");`. #122120

Bug fixes

-- Sequence options for `NO MINVALUE` and `NO MAXVALUE` now match PostgreSQL behavior. Sequence `MINVALUE` and `MAXVALUE` automatically adjust to new types bounds mirroring behavior of PostgreSQL. [#121310][#121310] -- CockroachDB could previously "leak" reported memory usage as accounted by the internal memory accounting system, the limit for which is configured with the `--max-sql-memory` flag, on long-running sessions that issue many (hundreds of thousands or more) transactions. This, in turn, could result in `"root: memory budget exceeded"` errors for other queries. This bug was present in versions v23.1.17 and v23.2.3 and is now fixed. [#121873][#121873] -- CockroachDB could previously incorrectly evaluate `IN` expressions that had `INT2` or `INT4` type on the left side and values outside of the range of the left side on the right side. The bug has been present since at least v21.1 and is now fixed. [#121954][#121954] -- Fixed a slow memory leak in the deprecated [Pub/Sub changefeeds]({% link v24.1/changefeed-sinks.md %}), which can accumulate when restarting or canceling many deprecated Pub/Sub changefeeds. The bug had been present since the deprecated Pub/Sub changefeed was introduced in a testing release of v22.1. [#121867][#121867] +- Sequence options for `NO MINVALUE` and `NO MAXVALUE` now match PostgreSQL behavior. Sequence `MINVALUE` and `MAXVALUE` automatically adjust to new types bounds mirroring behavior of PostgreSQL. #121310 +- CockroachDB could previously "leak" reported memory usage as accounted by the internal memory accounting system, the limit for which is configured with the `--max-sql-memory` flag, on long-running sessions that issue many (hundreds of thousands or more) transactions. This, in turn, could result in `"root: memory budget exceeded"` errors for other queries. This bug was present in versions v23.1.17 and v23.2.3 and is now fixed. #121873 +- CockroachDB could previously incorrectly evaluate `IN` expressions that had `INT2` or `INT4` type on the left side and values outside of the range of the left side on the right side. The bug has been present since at least v21.1 and is now fixed. #121954 +- Fixed a slow memory leak in the deprecated [Pub/Sub changefeeds]({% link v24.1/changefeed-sinks.md %}), which can accumulate when restarting or canceling many deprecated Pub/Sub changefeeds. The bug had been present since the deprecated Pub/Sub changefeed was introduced in a testing release of v22.1. #121867
@@ -40,17 +40,3 @@ This release includes 134 merged PRs by 36 authors.
-[#120787]: https://github.com/cockroachdb/cockroach/pull/120787 -[#120830]: https://github.com/cockroachdb/cockroach/pull/120830 -[#121111]: https://github.com/cockroachdb/cockroach/pull/121111 -[#121286]: https://github.com/cockroachdb/cockroach/pull/121286 -[#121310]: https://github.com/cockroachdb/cockroach/pull/121310 -[#121313]: https://github.com/cockroachdb/cockroach/pull/121313 -[#121461]: https://github.com/cockroachdb/cockroach/pull/121461 -[#121466]: https://github.com/cockroachdb/cockroach/pull/121466 -[#121743]: https://github.com/cockroachdb/cockroach/pull/121743 -[#121811]: https://github.com/cockroachdb/cockroach/pull/121811 -[#121867]: https://github.com/cockroachdb/cockroach/pull/121867 -[#121873]: https://github.com/cockroachdb/cockroach/pull/121873 -[#121954]: https://github.com/cockroachdb/cockroach/pull/121954 -[#122120]: https://github.com/cockroachdb/cockroach/pull/122120 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-beta.2.md b/src/current/_includes/releases/v24.1/v24.1.0-beta.2.md index d4110a09d5a..fb1b3478101 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-beta.2.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-beta.2.md @@ -6,11 +6,11 @@ Release Date: April 24, 2024

Security updates

-- Added the [cluster setting]({% link v24.1/cluster-settings.md %}) `security.client_cert.subject_required.enabled` that enforces a mandatory requirement for the client certificate's role subject to be set. The subject can be defined through either the subject role option or by specifying the `root-cert-distinguished-name` and `node-cert-distinguished-name` properties. This setting applies to both RPC access and login via authCert. [#122368][#122368] +- Added the [cluster setting]({% link v24.1/cluster-settings.md %}) `security.client_cert.subject_required.enabled` that enforces a mandatory requirement for the client certificate's role subject to be set. The subject can be defined through either the subject role option or by specifying the `root-cert-distinguished-name` and `node-cert-distinguished-name` properties. This setting applies to both RPC access and login via authCert. #122368

Bug fixes

-- Fixed a bug where table statistics were sometimes not collected on tables that have virtual [computed columns]({% link v24.1/computed-columns.md %}) of a user-defined type when the `sql.stats.virtual_computed_columns.enabled` cluster setting is enabled. The setting was introduced in v23.2.4 and is disabled by default. Only clusters running v23.2.4 with the non-default setting are affected. [#122320][#122320] +- Fixed a bug where table statistics were sometimes not collected on tables that have virtual [computed columns]({% link v24.1/computed-columns.md %}) of a user-defined type when the `sql.stats.virtual_computed_columns.enabled` cluster setting is enabled. The setting was introduced in v23.2.4 and is disabled by default. Only clusters running v23.2.4 with the non-default setting are affected. #122320
@@ -20,5 +20,3 @@ This release includes 52 merged PRs by 28 authors.
-[#122320]: https://github.com/cockroachdb/cockroach/pull/122320 -[#122368]: https://github.com/cockroachdb/cockroach/pull/122368 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-beta.3.md b/src/current/_includes/releases/v24.1/v24.1.0-beta.3.md index 7acb78b4f7b..9ab80d1b84a 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-beta.3.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-beta.3.md @@ -6,33 +6,33 @@ Release Date: April 30, 2024

SQL language changes

-- Updated the [`SHOW GRANTS`]({% link v24.1/show-grants.md %}) responses to display the `object_type` and `object_name`, which has replaced the `relation_name` column. [#122823][#122823] -- Added [external connection]({% link v24.1/create-external-connection.md %}) granted privileges to the [`SHOW GRANTS`]({% link v24.1/show-grants.md %}) command. [#122823][#122823] +- Updated the [`SHOW GRANTS`]({% link v24.1/show-grants.md %}) responses to display the `object_type` and `object_name`, which has replaced the `relation_name` column. #122823 +- Added [external connection]({% link v24.1/create-external-connection.md %}) granted privileges to the [`SHOW GRANTS`]({% link v24.1/show-grants.md %}) command. #122823 - Introduced three new [cluster settings]({% link v24.1/cluster-settings.md %}) for controlling table statistics forecasting: - [`sql.stats.forecasts.min_observations`]({% link v24.1/cluster-settings.md %}) is the minimum number of observed statistics required to produce a forecast. - [`sql.stats.forecasts.min_goodness_of_fit`]({% link v24.1/cluster-settings.md %}) is the minimum R² (goodness of fit) measurement required from all predictive models to use a forecast. - - [`sql.stats.forecasts.max_decrease`]({% link v24.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. [#122459][#122459] + - [`sql.stats.forecasts.max_decrease`]({% link v24.1/cluster-settings.md %}) is the most a prediction can decrease, expressed as the minimum ratio of the prediction to the lowest prior observation. #122459

Bug fixes

-- Fixed a bug that could lead to descriptors having privileges to roles that no longer exist. Added an automated clean up for [dropped roles]({% link v24.1/drop-role.md %}) inside descriptors. [#122701][#122701] -- Fixed a bug where [client certificate authentication]({% link v24.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v24.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work since v23.1. For the feature to work correctly, the client must specify a valid db user in the [connection string]({% link v24.1/connection-parameters.md %}). [#122738][#122738] -- Fixed a bug where the [row-based execution engine]({% link v24.1/architecture/sql-layer.md %}#query-execution) could drop a [`LIMIT`]({% link v24.1/limit-offset.md %}) clause when there was an [`ORDER BY`]({% link v24.1/order-by.md %}) clause, and the ordering was partially provided by an input operator. For example, this bug could occur with an ordering such as `ORDER BY a, b` when the scanned index was only ordered on column `a`. The impact of this bug was that more rows may have been returned than specified by the `LIMIT` clause. This bug is only present when not using the [vectorized execution engine]({% link v24.1/architecture/sql-layer.md %}#vectorized-query-execution). That is, when running with `SET vectorize = off;`. This bug has existed since CockroachDB v22.1. [#122837][#122837] -- Previously, CockroachDB could run into an internal error when evaluating [PL/pgSQL]({% link v24.1/plpgsql.md %}) routines with nested blocks. The bug is only present in 24.1.0-beta versions. This bug is now fixed. [#122939][#122939] -- Fixed a bug where [`UPDATE`]({% link v24.1/update.md %}) and [`UPSERT`]({% link v24.1/upsert.md %}) queries with a subquery were sometimes inappropriately using implicit [`FOR UPDATE`]({% link v24.1/select-for-update.md %}) locking within the subquery. This bug has existed since implicit `FOR UPDATE` locking was introduced in v20.1. [#121391][#121391] -- [Dropping]({% link v24.1/alter-table.md %}#drop-column) and [adding]({% link v24.1/alter-table.md %}#add-column) a column with the same name no longer results in a `"column already exists error"`. [#122631][#122631] -- Fixed a bug that could cause an internal error of the form `invalid datum type given: ..., expected ...` when a `RECORD`-returning [user-defined function]({% link v24.1/user-defined-functions.md %}), used as a data source, was supplied a column definition list with mismatched types. This bug has existed since v23.1. [#122305][#122305] -- Fixed a bug that could result in an internal error when attempting to create a [PL/pgSQL]({% link v24.1/plpgsql.md %}) routine using the (unsupported) `%ROWTYPE` syntax for a variable declaration. Now, an expected syntax error is returned instead. [#122966][#122966] -- Fixed a bug that could result in an assertion error during evaluation of [PL/pgSQL]({% link v24.1/plpgsql.md %}) routines that invoke procedures while using `DEFAULT` arguments. The bug was present in v24.1.0-beta releases and is now fixed. [#122943][#122943] -- Previously, privileges granted for [external connections]({% link v24.1/create-external-connection.md %}) were displaying in `SHOW SYSTEM GRANTS` with no associated object name. Now these privileges are no longer displayed. Instead, the statement `SHOW GRANTS ON EXTERNAL CONNECTION` should be used to view external connection privileges with their associated object name. [#122857][#122857] -- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v24.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. [#122459][#122459] -- Fixed a bug introduced in v23.2 that could cause a [PL/pgSQL]({% link v24.1/plpgsql.md %}) variable assignment to not be executed if the variable was never referenced after the assignment. [#123045][#123045] +- Fixed a bug that could lead to descriptors having privileges to roles that no longer exist. Added an automated clean up for [dropped roles]({% link v24.1/drop-role.md %}) inside descriptors. #122701 +- Fixed a bug where [client certificate authentication]({% link v24.1/authentication.md %}#client-authentication) combined with [identity maps]({% link v24.1/sso-sql.md %}#identity-map-configuration) (`server.identity_map.configuration`) did not work since v23.1. For the feature to work correctly, the client must specify a valid db user in the [connection string]({% link v24.1/connection-parameters.md %}). #122738 +- Fixed a bug where the [row-based execution engine]({% link v24.1/architecture/sql-layer.md %}#query-execution) could drop a [`LIMIT`]({% link v24.1/limit-offset.md %}) clause when there was an [`ORDER BY`]({% link v24.1/order-by.md %}) clause, and the ordering was partially provided by an input operator. For example, this bug could occur with an ordering such as `ORDER BY a, b` when the scanned index was only ordered on column `a`. The impact of this bug was that more rows may have been returned than specified by the `LIMIT` clause. This bug is only present when not using the [vectorized execution engine]({% link v24.1/architecture/sql-layer.md %}#vectorized-query-execution). That is, when running with `SET vectorize = off;`. This bug has existed since CockroachDB v22.1. #122837 +- Previously, CockroachDB could run into an internal error when evaluating [PL/pgSQL]({% link v24.1/plpgsql.md %}) routines with nested blocks. The bug is only present in 24.1.0-beta versions. This bug is now fixed. #122939 +- Fixed a bug where [`UPDATE`]({% link v24.1/update.md %}) and [`UPSERT`]({% link v24.1/upsert.md %}) queries with a subquery were sometimes inappropriately using implicit [`FOR UPDATE`]({% link v24.1/select-for-update.md %}) locking within the subquery. This bug has existed since implicit `FOR UPDATE` locking was introduced in v20.1. #121391 +- [Dropping]({% link v24.1/alter-table.md %}#drop-column) and [adding]({% link v24.1/alter-table.md %}#add-column) a column with the same name no longer results in a `"column already exists error"`. #122631 +- Fixed a bug that could cause an internal error of the form `invalid datum type given: ..., expected ...` when a `RECORD`-returning [user-defined function]({% link v24.1/user-defined-functions.md %}), used as a data source, was supplied a column definition list with mismatched types. This bug has existed since v23.1. #122305 +- Fixed a bug that could result in an internal error when attempting to create a [PL/pgSQL]({% link v24.1/plpgsql.md %}) routine using the (unsupported) `%ROWTYPE` syntax for a variable declaration. Now, an expected syntax error is returned instead. #122966 +- Fixed a bug that could result in an assertion error during evaluation of [PL/pgSQL]({% link v24.1/plpgsql.md %}) routines that invoke procedures while using `DEFAULT` arguments. The bug was present in v24.1.0-beta releases and is now fixed. #122943 +- Previously, privileges granted for [external connections]({% link v24.1/create-external-connection.md %}) were displaying in `SHOW SYSTEM GRANTS` with no associated object name. Now these privileges are no longer displayed. Instead, the statement `SHOW GRANTS ON EXTERNAL CONNECTION` should be used to view external connection privileges with their associated object name. #122857 +- Statistics forecasts of zero rows can cause suboptimal [query plans]({% link v24.1/cost-based-optimizer.md %}). Forecasting will now avoid predicting zero rows for most downward-trending statistics. #122459 +- Fixed a bug introduced in v23.2 that could cause a [PL/pgSQL]({% link v24.1/plpgsql.md %}) variable assignment to not be executed if the variable was never referenced after the assignment. #123045

Performance improvements

-- More efficient [query plans]({% link v24.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v24.1/set-vars.md %}) is enabled. It is disabled by default. [#122838][#122838] -- The [optimizer]({% link v24.1/cost-based-optimizer.md %}) now costs `distinct-on` operators more accurately. It may produce more efficient query plans in some cases. [#122850][#122850] -- Improved the speed for optimization of some statements using `GROUP BY` or `DISTINCT` or `ON CONFLICT` by skipping the [optimizer]({% link v24.1/cost-based-optimizer.md %}) rule `SplitGroupByScanIntoUnionScans` when it is not needed. [#123034][#123034] +- More efficient [query plans]({% link v24.1/cost-based-optimizer.md %}) are now generated for queries with text similarity filters, for example, `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v24.1/set-vars.md %}) is enabled. It is disabled by default. #122838 +- The [optimizer]({% link v24.1/cost-based-optimizer.md %}) now costs `distinct-on` operators more accurately. It may produce more efficient query plans in some cases. #122850 +- Improved the speed for optimization of some statements using `GROUP BY` or `DISTINCT` or `ON CONFLICT` by skipping the [optimizer]({% link v24.1/cost-based-optimizer.md %}) rule `SplitGroupByScanIntoUnionScans` when it is not needed. #123034
@@ -42,19 +42,3 @@ This release includes 56 merged PRs by 25 authors.
-[#121391]: https://github.com/cockroachdb/cockroach/pull/121391 -[#122305]: https://github.com/cockroachdb/cockroach/pull/122305 -[#122459]: https://github.com/cockroachdb/cockroach/pull/122459 -[#122631]: https://github.com/cockroachdb/cockroach/pull/122631 -[#122701]: https://github.com/cockroachdb/cockroach/pull/122701 -[#122738]: https://github.com/cockroachdb/cockroach/pull/122738 -[#122823]: https://github.com/cockroachdb/cockroach/pull/122823 -[#122837]: https://github.com/cockroachdb/cockroach/pull/122837 -[#122838]: https://github.com/cockroachdb/cockroach/pull/122838 -[#122850]: https://github.com/cockroachdb/cockroach/pull/122850 -[#122857]: https://github.com/cockroachdb/cockroach/pull/122857 -[#122939]: https://github.com/cockroachdb/cockroach/pull/122939 -[#122943]: https://github.com/cockroachdb/cockroach/pull/122943 -[#122966]: https://github.com/cockroachdb/cockroach/pull/122966 -[#123034]: https://github.com/cockroachdb/cockroach/pull/123034 -[#123045]: https://github.com/cockroachdb/cockroach/pull/123045 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-rc.1.md b/src/current/_includes/releases/v24.1/v24.1.0-rc.1.md index 60a8d9c6d64..60beba5eb87 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-rc.1.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-rc.1.md @@ -6,31 +6,31 @@ Release Date: May 8, 2024

SQL language changes

-- Added a new [session setting]({% link v24.1/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v24.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. [#123106][#123106] +- Added a new [session setting]({% link v24.1/session-variables.md %}) `optimizer_use_improved_multi_column_selectivity_estimate`, which if enabled, causes the [optimizer]({% link v24.1/cost-based-optimizer.md %}) to use an improved selectivity estimate for multi-column predicates. This setting will default to `true` on v24.2 and later, and `false` on earlier versions. #123106

Operational changes

-- Added two new [metrics]({% link v24.1/metrics.md %}): `range.snapshots.upreplication.rcvd-bytes` counts the number of [Raft]({% link v24.1/architecture/replication-layer.md %}#raft) recovery snapshot bytes received, and `range.snapshots.upreplication.sent-bytes` counts the number of Raft recovery snapshot bytes sent. Also updated `range.snapshots.recovery.rcvd-bytes` and `range.snapshots.recovery.sent-bytes` to only include Raft snapshots. A new line was added to the [**Snapshot Data Received**]({% link v24.1/ui-replication-dashboard.md %}#snapshot-data-received) graph. [#123055][#123055] +- Added two new [metrics]({% link v24.1/metrics.md %}): `range.snapshots.upreplication.rcvd-bytes` counts the number of [Raft]({% link v24.1/architecture/replication-layer.md %}#raft) recovery snapshot bytes received, and `range.snapshots.upreplication.sent-bytes` counts the number of Raft recovery snapshot bytes sent. Also updated `range.snapshots.recovery.rcvd-bytes` and `range.snapshots.recovery.sent-bytes` to only include Raft snapshots. A new line was added to the [**Snapshot Data Received**]({% link v24.1/ui-replication-dashboard.md %}#snapshot-data-received) graph. #123055

DB Console changes

-- Added a **Replication Lag** graph to the [**Physical Cluster Replication**]({% link v24.1/physical-cluster-replication-monitoring.md %}) dashboard to measure replication lag between primary and standby clusters using [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}). [#123285][#123285] +- Added a **Replication Lag** graph to the [**Physical Cluster Replication**]({% link v24.1/physical-cluster-replication-monitoring.md %}) dashboard to measure replication lag between primary and standby clusters using [physical cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}). #123285

Bug fixes

-- Fixed a bug that caused the [**Tables**]({% link v24.1/ui-databases-page.md %}#tables-view) and [**Table Details**]({% link v24.1/ui-databases-page.md %}#table-details) pages in the DB Console to display an incorrect value for **Table Stats Last Updated**. [#122816][#122816] -- Fixed a bug in the DB Console's [**Custom Chart**]({% link v24.1/ui-custom-chart-debug-page.md %}) tool where store-level metrics were displayed only for the first store ID associated with the node. Now data is displayed for all stores present on a node, and a single time series is shown for each store, rather than an aggregated value for all of the node's stores. This allows finer-grained monitoring of store-level metrics. [#122705][#122705] -- Fixed a bug introduced in v22.2 that could cause the internal error `attempting to append refresh spans after the tracked timestamp has moved forward` in some edge cases. [#123136][#123136] -- Fixed a bug where a `TYPEDESC SCHEMA CHANGE` job could retry forever if the descriptor it targeted was already dropped. [#123273][#123273] -- Fixed a bug where, if the legacy schema changer was enabled, the [`CREATE SEQUENCE`]({% link v24.1/create-sequence.md %}) command would incorrectly require the user to have the `CREATE` [privilege]({% link v24.1/security-reference/authorization.md %}#privileges) on the parent database rather than only on the parent schema.[#123289][#123289] -- Fixed a bug where a [job]({% link v24.1/show-jobs.md %}) would fail if it reported an out-of-bound progress fraction. The error is now logged and no longer causes the job to fail. [#122965][#122965] +- Fixed a bug that caused the [**Tables**]({% link v24.1/ui-databases-page.md %}#tables-view) and [**Table Details**]({% link v24.1/ui-databases-page.md %}#table-details) pages in the DB Console to display an incorrect value for **Table Stats Last Updated**. #122816 +- Fixed a bug in the DB Console's [**Custom Chart**]({% link v24.1/ui-custom-chart-debug-page.md %}) tool where store-level metrics were displayed only for the first store ID associated with the node. Now data is displayed for all stores present on a node, and a single time series is shown for each store, rather than an aggregated value for all of the node's stores. This allows finer-grained monitoring of store-level metrics. #122705 +- Fixed a bug introduced in v22.2 that could cause the internal error `attempting to append refresh spans after the tracked timestamp has moved forward` in some edge cases. #123136 +- Fixed a bug where a `TYPEDESC SCHEMA CHANGE` job could retry forever if the descriptor it targeted was already dropped. #123273 +- Fixed a bug where, if the legacy schema changer was enabled, the [`CREATE SEQUENCE`]({% link v24.1/create-sequence.md %}) command would incorrectly require the user to have the `CREATE` [privilege]({% link v24.1/security-reference/authorization.md %}#privileges) on the parent database rather than only on the parent schema.#123289 +- Fixed a bug where a [job]({% link v24.1/show-jobs.md %}) would fail if it reported an out-of-bound progress fraction. The error is now logged and no longer causes the job to fail. #122965

Performance improvements

-- Added a new [session setting]({% link v24.1/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v24.1/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan.[#123106][#123106] -- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v24.1/cost-based-optimizer.md %}) from choosing a bad query plan due to over-estimating the selectivity of a multi-column predicate. [#123106][#123106] -- Improved the efficiency of error handling in the [vectorized execution engine]({% link v24.1/vectorized-execution.md %}), to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. [#123501][#123501] -- Disabled a poorly-performing [changefeed]({% link v24.1/change-data-capture-overview.md %}) optimization that was intended to reduce duplicates during aggregator restarts. [#123597][#123597] +- Added a new [session setting]({% link v24.1/session-variables.md %}) `optimizer_use_improved_zigzag_join_costing`. When enabled and when the [cluster setting]({% link v24.1/cluster-settings.md %}) `enable_zigzag_join` is also enabled, the cost of zigzag joins is updated such that a zigzag join will be chosen over a scan only if it produces fewer rows than a scan.#123106 +- Improved the selectivity estimation of multi-column filters when the multi-column distinct count is high. This prevents the [optimizer]({% link v24.1/cost-based-optimizer.md %}) from choosing a bad query plan due to over-estimating the selectivity of a multi-column predicate. #123106 +- Improved the efficiency of error handling in the [vectorized execution engine]({% link v24.1/vectorized-execution.md %}), to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. #123501 +- Disabled a poorly-performing [changefeed]({% link v24.1/change-data-capture-overview.md %}) optimization that was intended to reduce duplicates during aggregator restarts. #123597
@@ -40,16 +40,3 @@ This release includes 57 merged PRs by 24 authors.
-[#122705]: https://github.com/cockroachdb/cockroach/pull/122705 -[#122816]: https://github.com/cockroachdb/cockroach/pull/122816 -[#122965]: https://github.com/cockroachdb/cockroach/pull/122965 -[#123055]: https://github.com/cockroachdb/cockroach/pull/123055 -[#123106]: https://github.com/cockroachdb/cockroach/pull/123106 -[#123136]: https://github.com/cockroachdb/cockroach/pull/123136 -[#123144]: https://github.com/cockroachdb/cockroach/pull/123144 -[#123273]: https://github.com/cockroachdb/cockroach/pull/123273 -[#123285]: https://github.com/cockroachdb/cockroach/pull/123285 -[#123289]: https://github.com/cockroachdb/cockroach/pull/123289 -[#123373]: https://github.com/cockroachdb/cockroach/pull/123373 -[#123501]: https://github.com/cockroachdb/cockroach/pull/123501 -[#123597]: https://github.com/cockroachdb/cockroach/pull/123597 diff --git a/src/current/_includes/releases/v24.1/v24.1.0-rc.2.md b/src/current/_includes/releases/v24.1/v24.1.0-rc.2.md index c7dab7dd73f..8d0057af904 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0-rc.2.md +++ b/src/current/_includes/releases/v24.1/v24.1.0-rc.2.md @@ -11,8 +11,8 @@ Release Date: May 16, 2024 - The cluster settings `changefeed.frontier_checkpoint_frequency` and `low changefeed.frontier_highwater_lag_checkpoint_threshold` were set low, which resulted in the initial scan taking many multiples of the configured frequency to complete. - There were multiple target tables with significant differences in row counts in one changefeed. - The changefeed target tables were large with many ranges. - - The initial scan took a long time to complete (an hour or longer). [#123968][#123968] -- Fixed a crash introduced in v24.1.0-beta.2 that could occur when planning [statistics collection]({% link v24.1/cost-based-optimizer.md %}#table-statistics) on a table with a [virtual computed column]({% link v24.1/computed-columns.md %}) using a user-defined type and the `sql.stats.virtual_computed_columns.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is set to `true`. `sql.stats.virtual_computed_columns.enabled` was introduced in v24.1.0-alpha.1 as `true` by default and introduced in v23.2.5 as `false` by default. [#124064][#124064] + - The initial scan took a long time to complete (an hour or longer). #123968 +- Fixed a crash introduced in v24.1.0-beta.2 that could occur when planning [statistics collection]({% link v24.1/cost-based-optimizer.md %}#table-statistics) on a table with a [virtual computed column]({% link v24.1/computed-columns.md %}) using a user-defined type and the `sql.stats.virtual_computed_columns.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is set to `true`. `sql.stats.virtual_computed_columns.enabled` was introduced in v24.1.0-alpha.1 as `true` by default and introduced in v23.2.5 as `false` by default. #124064
@@ -22,5 +22,3 @@ This release includes 3 merged PRs by 3 authors.
-[#123968]: https://github.com/cockroachdb/cockroach/pull/123968 -[#124064]: https://github.com/cockroachdb/cockroach/pull/124064 diff --git a/src/current/_includes/releases/v24.1/v24.1.0.md b/src/current/_includes/releases/v24.1/v24.1.0.md index 80bb31f523e..af2a2a0b6b7 100644 --- a/src/current/_includes/releases/v24.1/v24.1.0.md +++ b/src/current/_includes/releases/v24.1/v24.1.0.md @@ -475,9 +475,9 @@ COMMITTED more easily, without needing to teach the schema changer about Before [upgrading to CockroachDB v24.1]({% link v24.1/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as [key cluster setting changes](#v24-1-0-cluster-settings), and adjust your deployment as necessary. -- [`AS OF SYSTEM TIME`]({% link v24.1/as-of-system-time.md %}) queries can no longer use a timestamp followed by a question mark to signify a future-time value. This was an undocumented syntax. [#116830](https://github.com/cockroachdb/cockroach/pull/116830) -- The [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation level now requires the cluster to have a valid [enterprise license](https://cockroachlabs.com/docs/v24.1/licensing-faqs#obtain-a-license). Otherwise, transactions which are configured to run as `READ COMMITTED` will be upgraded to [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}), as described in the next note. [#120154](https://github.com/cockroachdb/cockroach/pull/120154) -- The `sql.txn.read_committed_isolation.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is now `true` by default. As a result for enterprise users, [`READ COMMITTED`]({% link v24.1/read-committed.md %}) transactions are **not** automatically upgraded to [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}), and will run as `READ COMMITTED` by default. On v23.2, refer to the [Upgrades of SQL Transaction Isolation Level]({% link v24.1/ui-sql-dashboard.md %}#upgrades-of-sql-transaction-isolation-level) graph in the DB Console to check whether any transaction is being upgraded from a weaker isolation level to `SERIALIZABLE`, and could therefore run differently on v24.1. [#118479](https://github.com/cockroachdb/cockroach/pull/118479) +- [`AS OF SYSTEM TIME`]({% link v24.1/as-of-system-time.md %}) queries can no longer use a timestamp followed by a question mark to signify a future-time value. This was an undocumented syntax. #116830 +- The [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation level now requires the cluster to have a valid [enterprise license](https://cockroachlabs.com/docs/v24.1/licensing-faqs#obtain-a-license). Otherwise, transactions which are configured to run as `READ COMMITTED` will be upgraded to [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}), as described in the next note. #120154 +- The `sql.txn.read_committed_isolation.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is now `true` by default. As a result for enterprise users, [`READ COMMITTED`]({% link v24.1/read-committed.md %}) transactions are **not** automatically upgraded to [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}), and will run as `READ COMMITTED` by default. On v23.2, refer to the [Upgrades of SQL Transaction Isolation Level]({% link v24.1/ui-sql-dashboard.md %}#upgrades-of-sql-transaction-isolation-level) graph in the DB Console to check whether any transaction is being upgraded from a weaker isolation level to `SERIALIZABLE`, and could therefore run differently on v24.1. #118479

Features that require upgrade finalization

@@ -490,14 +490,14 @@ The following changes should be reviewed prior to upgrading. Default cluster set - `sql.txn.read_committed_isolation.enabled` is now `true` by default. When set to `true`, transactions use the `READ COMMITTED` isolation level if specified by `BEGIN`/`SET` commands. - If the cluster setting is `false`, as was the default in v23.2, such `READ COMMITTED` transactions will instead run as `SERIALIZABLE`. - To check whether any transactions are being upgraded to `SERIALIZABLE`, see the [**Upgrades of SQL Transaction Isolation Level**]({% link v24.1/ui-sql-dashboard.md %}#upgrades-of-sql-transaction-isolation-level) graph in the DB Console." -- The `changefeed.balance_range_distribution.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is now deprecated. Instead, use the new cluster setting `changefeed.default_range_distribution_strategy`. `changefeed.default_range_distribution_strategy='balanced_simple'` has the same effect as setting `changefeed.balance_range_distribution.enabled=true`. It does not require `initial_scan='only'`, which was required by the old setting. [#115166][#115166] -- Added the [cluster setting]({% link v24.1/cluster-settings.md %}) `security.client_cert.subject_required.enabled` which enforces a mandatory requirement for the client certificate's role subject to be set. The subject can be defined through either the subject role option or by specifying the `root-cert-distinguished-name` and `node-cert-distinguished-name` properties. This setting applies to both RPC access and login via authCert. [#122368][#122368] -- The [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.contention.record_serialization_conflicts.enabled` is now `on` by default. As a result, any [`40001` error]({% link v24.1/transaction-retry-error-reference.md %}) that contains conflicting transaction information will be recorded by the contention registry, improving the ability to troubleshoot. For more information, refer to the [Insights page]({% link v24.1/ui-insights-page.md %}) documentation. [#116664][#116664] +- The `changefeed.balance_range_distribution.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) is now deprecated. Instead, use the new cluster setting `changefeed.default_range_distribution_strategy`. `changefeed.default_range_distribution_strategy='balanced_simple'` has the same effect as setting `changefeed.balance_range_distribution.enabled=true`. It does not require `initial_scan='only'`, which was required by the old setting. #115166 +- Added the [cluster setting]({% link v24.1/cluster-settings.md %}) `security.client_cert.subject_required.enabled` which enforces a mandatory requirement for the client certificate's role subject to be set. The subject can be defined through either the subject role option or by specifying the `root-cert-distinguished-name` and `node-cert-distinguished-name` properties. This setting applies to both RPC access and login via authCert. #122368 +- The [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.contention.record_serialization_conflicts.enabled` is now `on` by default. As a result, any [`40001` error]({% link v24.1/transaction-retry-error-reference.md %}) that contains conflicting transaction information will be recorded by the contention registry, improving the ability to troubleshoot. For more information, refer to the [Insights page]({% link v24.1/ui-insights-page.md %}) documentation. #116664 - The new [cluster setting]({% link v24.1/cluster-settings.md %}) `storage.sstable.compression_algorithm` configures the compression algorithm used when compressing sstable blocks. Supported values are: "snappy" and "zstd" [snappy = `1`, zstd = `2`]. Changing the default of snappy to zstd can result in substantial performance improvement, however, the effects this change may be highly dependent on the workload and data, so experimentation is recommended before enabling zstd in production environments. -- The new setting `storage.wal_failover.unhealthy_op_threshold` allows you to set the latency threshold at which a WAL (Write-Ahead Logging) write is considered unhealthy. When exceeded, the node will attempt to write WAL entries to a secondary store's volume. For more information, refer to [#120509][#120509] -- The new `server.max_open_transactions_per_gateway` [cluster setting]({% link v24.1/cluster-settings.md %}), when set to a non-negative value, allows only admin users to execute a query if the number of open transactions on the current gateway node is already at the configured limit. [#118781][#118781] -- The new `server.redact_sensitive_settings.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) (`false` by default), when set to `true`, redacts the values of the following settings in the output of `SHOW` commands or other introspection interfaces. In the future, newly-added sensitive cluster settings will be redacted as well. Users with the `MODIFYCLUSTERSETTING` [privilege](https://www.cockroachlabs.com/docs/v24.1/security-reference/authorization#managing-privileges) can always view the unredacted settings. [#117729][#117729] -- The new boolean changefeed option [`ignore_disable_changefeed_replication`](../v24.1/create-changefeed.html#ignore-disable-changefeed-replication), when set to `true`, prevents the changefeed from filtering events even if CDC filtering is configured via the `disable_changefeed_replication` [session variable](../v24.1/session-variables.html), `sql.ttl.changefeed_replication.disabled` [cluster setting](../v24.1/cluster-settings.html), or the `ttl_disable_changefeed_replication` [table storage parameter](../v24.1/alter-table.html#table-storage-parameters). [#120255][#120255] +- The new setting `storage.wal_failover.unhealthy_op_threshold` allows you to set the latency threshold at which a WAL (Write-Ahead Logging) write is considered unhealthy. When exceeded, the node will attempt to write WAL entries to a secondary store's volume. For more information, refer to #120509 +- The new `server.max_open_transactions_per_gateway` [cluster setting]({% link v24.1/cluster-settings.md %}), when set to a non-negative value, allows only admin users to execute a query if the number of open transactions on the current gateway node is already at the configured limit. #118781 +- The new `server.redact_sensitive_settings.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) (`false` by default), when set to `true`, redacts the values of the following settings in the output of `SHOW` commands or other introspection interfaces. In the future, newly-added sensitive cluster settings will be redacted as well. Users with the `MODIFYCLUSTERSETTING` [privilege](https://www.cockroachlabs.com/docs/v24.1/security-reference/authorization#managing-privileges) can always view the unredacted settings. #117729 +- The new boolean changefeed option [`ignore_disable_changefeed_replication`](../v24.1/create-changefeed.html#ignore-disable-changefeed-replication), when set to `true`, prevents the changefeed from filtering events even if CDC filtering is configured via the `disable_changefeed_replication` [session variable](../v24.1/session-variables.html), `sql.ttl.changefeed_replication.disabled` [cluster setting](../v24.1/cluster-settings.html), or the `ttl_disable_changefeed_replication` [table storage parameter](../v24.1/alter-table.html#table-storage-parameters). #120255 - The provisioned-rate field, if specified, should no longer accept a disk-name or an optional bandwidth field. To use the disk bandwidth constraint the store-spec must contain @@ -510,8 +510,8 @@ setting `kvadmission.store.provisioned_bandwidth` will be used. When set to a no

Deprecations

-- `changefeed.balance_range_distribution.enable` is now deprecated. Instead, use the new [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.default_range_distribution_strategy`. `changefeed.default_range_distribution_strategy='balanced_simple'` has the same effect as setting `changefeed.balance_range_distribution.enable=true`. It does not require `initial_scan='only'`, which was required by the old setting. [#115166][#115166] -- The `cockroach connect` command has been removed. This command was [deprecated]({% link releases/v23.2.md %}#v23-2-0-deprecations) in CockroachDB v23.2. [#113893][#113893] +- `changefeed.balance_range_distribution.enable` is now deprecated. Instead, use the new [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.default_range_distribution_strategy`. `changefeed.default_range_distribution_strategy='balanced_simple'` has the same effect as setting `changefeed.balance_range_distribution.enable=true`. It does not require `initial_scan='only'`, which was required by the old setting. #115166 +- The `cockroach connect` command has been removed. This command was [deprecated]({% link releases/v23.2.md %}#v23-2-0-deprecations) in CockroachDB v23.2. #113893

Known limitations

@@ -531,12 +531,3 @@ Docs | [SQL Feature Support]({% link v24.1/sql-feature-support.m Docs | [Change Data Capture Overview]({% link v24.1/change-data-capture-overview.md %}) | This page summarizes CockroachDB's data streaming capabilities. Change data capture (CDC) provides efficient, distributed, row-level changefeeds into a configurable sink for downstream processing such as reporting, caching, or full-text indexing. Docs | [Backup Architecture]({% link v24.1/backup-architecture.md %}) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 -[#113893]: https://github.com/cockroachdb/cockroach/pull/113893 -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 -[#122368]: https://github.com/cockroachdb/cockroach/pull/122368 -[#116664]: https://github.com/cockroachdb/cockroach/pull/116664 -[#120509]: https://github.com/cockroachdb/cockroach/pull/120509 -[#118781]: https://github.com/cockroachdb/cockroach/pull/118781 -[#117729]: https://github.com/cockroachdb/cockroach/pull/117729 -[#120255]: https://github.com/cockroachdb/cockroach/pull/120255 diff --git a/src/current/_includes/releases/v24.1/v24.1.1.md b/src/current/_includes/releases/v24.1/v24.1.1.md index aef18f2ab4d..5867f86eccf 100644 --- a/src/current/_includes/releases/v24.1/v24.1.1.md +++ b/src/current/_includes/releases/v24.1/v24.1.1.md @@ -11,53 +11,53 @@ Release Date: June 14, 2024 - The cluster settings `changefeed.frontier_checkpoint_frequency` and `low changefeed.frontier_highwater_lag_checkpoint_threshold` were set low, which resulted in the initial scan taking many multiples of the configured frequency to complete. - There were multiple target tables with significant differences in row counts in one changefeed. - The changefeed target tables were large with many ranges. - - The initial scan took a long time to complete (an hour or longer). [#123967][#123967] -- History retention jobs created upon completion of [cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) no longer erroneously indicate that they failed when they expire. [#124055][#124055] + - The initial scan took a long time to complete (an hour or longer). #123967 +- History retention jobs created upon completion of [cluster replication]({% link v24.1/physical-cluster-replication-overview.md %}) no longer erroneously indicate that they failed when they expire. #124055

SQL language changes

-- The [optimizer]({% link v24.1/cost-based-optimizer.md %}) can now plan constrained scans over partial indexes in more cases, particularly on partial indexes with predicates referencing [virtual computed columns]({% link v24.1/computed-columns.md %}#virtual-computed-columns). [#123468][#123468] -- The storage parameter `ttl_delete_rate_limit`, which determines the rate limit for deleting expired rows, is now set to `100` by default. [#124354][#124354] -- CockroachDB no longer limits precision when converting [spatial data types]({% link v24.1/architecture/glossary.md %}#data-types) to JSON. [#124536][#124536] -- The `optimizer_push_offset_into_index_join` [session setting]({% link v24.1/session-variables.md %}) has been added. When enabled, the [optimizer]({% link v24.1/cost-based-optimizer.md %}) will attempt to push offset expressions into index join expressions to produce more efficient query plans. The setting is enabled by default on v24.1 and later, and disabled on v23.2. [#124490][#124490] -- The default value of the `sql.defaults.results_buffer.size` [cluster setting]({% link v24.1/cluster-settings.md %}) has been changed from 16KiB to 512KiB. This reduces the chance that clients using [`READ COMMITTED`]({% link v24.1/read-committed.md %}) transactions will encounter errors that cannot automatically be retried within CockroachDB. [#124633][#124633] +- The [optimizer]({% link v24.1/cost-based-optimizer.md %}) can now plan constrained scans over partial indexes in more cases, particularly on partial indexes with predicates referencing [virtual computed columns]({% link v24.1/computed-columns.md %}#virtual-computed-columns). #123468 +- The storage parameter `ttl_delete_rate_limit`, which determines the rate limit for deleting expired rows, is now set to `100` by default. #124354 +- CockroachDB no longer limits precision when converting [spatial data types]({% link v24.1/architecture/glossary.md %}#data-types) to JSON. #124536 +- The `optimizer_push_offset_into_index_join` [session setting]({% link v24.1/session-variables.md %}) has been added. When enabled, the [optimizer]({% link v24.1/cost-based-optimizer.md %}) will attempt to push offset expressions into index join expressions to produce more efficient query plans. The setting is enabled by default on v24.1 and later, and disabled on v23.2. #124490 +- The default value of the `sql.defaults.results_buffer.size` [cluster setting]({% link v24.1/cluster-settings.md %}) has been changed from 16KiB to 512KiB. This reduces the chance that clients using [`READ COMMITTED`]({% link v24.1/read-committed.md %}) transactions will encounter errors that cannot automatically be retried within CockroachDB. #124633

Operational changes

-- The default values for the [cluster settings]({% link v24.1/cluster-settings.md %}) `sql.metrics.max_mem_stmt_fingerprints` and `sql.metrics.max_mem_txn_fingerprints` have been changed from `100000` to `5000`, thus lowering the default limits for in-memory statement and transaction fingerprints. [#123430][#123430] -- The new `sql.pgwire.pipeline.count` [gauge metric]({% link v24.1/metrics.md %}) shows the number of wire protocol commands that have been received by the server, but have not yet begun processing. This metric will only grow if clients are using the "pipeline mode" of the PostgreSQL wire protocol. [#124256][#124256] -- The `client_authentication_ok` and `client_session_end` messages are now logged to the `SESSIONS` [log channel]({% link v24.1/logging-overview.md %}#logging-channels) unconditionally. Previously, these would be logged only if the `server.auth_log.sql_sessions.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) was set to `true`. All other `SESSIONS` log messages are logged only if `server.auth_log.sql_sessions.enabled` or `server.auth_log.sql_connections.enabled` are set to `true`. To prevent logging of `client_authentication_ok` or `client_session_end` messages, you can optionally disable the `SESSIONS` log channel. [#124369][#124369] +- The default values for the [cluster settings]({% link v24.1/cluster-settings.md %}) `sql.metrics.max_mem_stmt_fingerprints` and `sql.metrics.max_mem_txn_fingerprints` have been changed from `100000` to `5000`, thus lowering the default limits for in-memory statement and transaction fingerprints. #123430 +- The new `sql.pgwire.pipeline.count` [gauge metric]({% link v24.1/metrics.md %}) shows the number of wire protocol commands that have been received by the server, but have not yet begun processing. This metric will only grow if clients are using the "pipeline mode" of the PostgreSQL wire protocol. #124256 +- The `client_authentication_ok` and `client_session_end` messages are now logged to the `SESSIONS` [log channel]({% link v24.1/logging-overview.md %}#logging-channels) unconditionally. Previously, these would be logged only if the `server.auth_log.sql_sessions.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) was set to `true`. All other `SESSIONS` log messages are logged only if `server.auth_log.sql_sessions.enabled` or `server.auth_log.sql_connections.enabled` are set to `true`. To prevent logging of `client_authentication_ok` or `client_session_end` messages, you can optionally disable the `SESSIONS` log channel. #124369

DB Console changes

-- Fixed a bug where the [replication lag metric]({% link v24.1/physical-cluster-replication-monitoring.md %}) would falsely report high lag for multi-node clusters and on cutover. [#123585][#123585] -- Fixed a bug that caused [**SQL Activity**]({% link v23.2/ui-sql-dashboard.md %}) entries sorted by `% of Runtime` to be sorted incorrectly. [#123903][#123903] -- The "Admission Delay Rate", "Admission Work Rate", and "Requests Waiting For Flow Tokens" time-series charts have been removed from the DB Console. These charts can be difficult to interpret and provide little value for overload investigations. [#124509][#124509] -- The [**Overload** dashboard]({% link v24.1/ui-overload-dashboard.md %}) now includes descriptions for all metrics. [#124509][#124509] -- Metrics on the [**Overload** dashboard]({% link v24.1/ui-overload-dashboard.md %}) have been reordered to improve their categorization. The metrics are now roughly in the following order: 1. Metrics to help determine which resource is constrained (IO, CPU); 2. Metrics to narrow down which [admission control]({% link v24.1/admission-control.md %}) queues are seeing requests waiting; 3. More advanced metrics about system health (goroutine scheduler, L0 sublevels, etc.). [#124509][#124509] -- New metrics `cr.store.storage.l0-sublevels` and `cr.node.go.scheduler_latency-p99.9` on the [**Overload** dashboard]({% link v24.1/ui-overload-dashboard.md %}) provide better visibility into overloaded resources. [#124509][#124509] -- There are now four separate graphs for Admission Queue Delay: 1. Foreground (regular) CPU work; 2. Store (IO) work; 3. Background (elastic) CPU work; 4. Replication Admission Control (store overload on replicas). [#124509][#124509] +- Fixed a bug where the [replication lag metric]({% link v24.1/physical-cluster-replication-monitoring.md %}) would falsely report high lag for multi-node clusters and on cutover. #123585 +- Fixed a bug that caused [**SQL Activity**]({% link v23.2/ui-sql-dashboard.md %}) entries sorted by `% of Runtime` to be sorted incorrectly. #123903 +- The "Admission Delay Rate", "Admission Work Rate", and "Requests Waiting For Flow Tokens" time-series charts have been removed from the DB Console. These charts can be difficult to interpret and provide little value for overload investigations. #124509 +- The [**Overload** dashboard]({% link v24.1/ui-overload-dashboard.md %}) now includes descriptions for all metrics. #124509 +- Metrics on the [**Overload** dashboard]({% link v24.1/ui-overload-dashboard.md %}) have been reordered to improve their categorization. The metrics are now roughly in the following order: 1. Metrics to help determine which resource is constrained (IO, CPU); 2. Metrics to narrow down which [admission control]({% link v24.1/admission-control.md %}) queues are seeing requests waiting; 3. More advanced metrics about system health (goroutine scheduler, L0 sublevels, etc.). #124509 +- New metrics `cr.store.storage.l0-sublevels` and `cr.node.go.scheduler_latency-p99.9` on the [**Overload** dashboard]({% link v24.1/ui-overload-dashboard.md %}) provide better visibility into overloaded resources. #124509 +- There are now four separate graphs for Admission Queue Delay: 1. Foreground (regular) CPU work; 2. Store (IO) work; 3. Background (elastic) CPU work; 4. Replication Admission Control (store overload on replicas). #124509

Bug fixes

-- Fixed a bug that would occur when [`ALTER TYPE ... DROP VALUE`]({% link v24.1/alter-type.md %}) is followed by `DROP SCHEMA CASCADE ...` in the same transaction. Previously, the `ALTER TYPE` schema change would get queued up to run at commit time, but by that point, the type may have already been removed, so the commit could fail. [#123577][#123577] -- Tables are now automatically repaired when the errors `invalid inbound foreign key ... origin table ID should be ...` or `invalid outbound foreign key ... reference table ID should be ...` occur. [#123668][#123668] -- Fixed a bug where a failed [`RESTORE`]({% link v24.1/restore.md %}) could not be retried without manual intervention. [#123205][#123205] -- Fixed a bug introduced in alpha versions of v23.1 where calling a routine could result in an unexpected `function ... does not exist` error. The bug is triggered when the routine is called twice using the exact same SQL query, and either: (a) the routine has polymorphic arguments, or: (b) between the two calls, the routine is replaced by a routine with the same name and different parameters. [#123516][#123516] -- Fixed a rare bug where a lease transfer could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was overloaded and [failing to heartbeat its node liveness record]({% link v24.1/cluster-setup-troubleshooting.md %}#node-liveness-issues). [#123533][#123533] -- Fixed a crash introduced in v24.1.0-beta.2 that could occur when planning statistics collection on a table with a [virtual computed column]({% link v24.1/computed-columns.md %}#virtual-computed-columns) using a [user-defined type]({% link v24.1/create-type.md %}) when the newly-introduced [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.stats.virtual_computed_columns.enabled` is set to `true`. (The setting was introduced in v24.1.0-alpha.1, and defaults to `true`.) [#124060][#124060] -- Fixed a bug where an [`ALTER TABLE ... ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) statement could hang if the table had any indexes that were referred to by [views]({% link v24.1/views.md %}) or [functions]({% link v24.1/functions-and-operators.md %}) using the `FORCE INDEX` clause. [#124323][#124323] -- Fixed a bug introduced in v24.1.0 where the `max_decimal_digits` argument of the `st_geojson` [builtin function]({% link v24.1/functions-and-operators.md %}#built-in-functions) was ignored and the default was used instead. [#124502][#124502] -- Scattering a range with a replication factor of 1 now no longer erroneously up-replicates the range to two replicas. Leases will also no longer thrash between nodes when perturbed with a replication factor of 1. [#124453][#124453] -- Fixed a bug where, if the `ttl_row_stats_poll_interval` storage parameter was non-zero for a table with [row-level TTL]({% link v24.1/row-level-ttl.md %}) enabled, the queries issued to update row statistics could block the job from completing. Now, if the job completes, these statistics queries are cancelled. This means that the `jobs.row_level_ttl.total_rows` and `jobs.row_level_ttl.total_expired_rows` metrics will report `0` if the job finishes before the row stats queries complete. [#124627][#124627] -- Fixed a bug where a [`DROP ROLE`]({% link v24.1/drop-role.md %}) or [`DROP USER`]({% link v24.1/drop-user.md %}) command could leave references behind inside types, which could prevent subsequent [`SHOW GRANTS`]({% link v24.1/show-grants.md %}) commands from working. [#124619][#124619] -- Fixed a bug where the `results_buffer_size` [session variable]({% link v24.1/session-variables.md %}) could not be configured by using the "options" query parameter in the connection string, but only as a top-level query parameter. Now, `results_buffer_size` can be configured in either part of the connection string. This variable still cannot be changed with the [`SET`]({% link v24.1/set-vars.md %}) command after the session begins. [#124775][#124775] -- Fixed a bug introduced in v20.2 where a change to a [user-defined type]({% link v24.1/create-type.md %}) could cause queries against tables using that type to fail with an error message like `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v24.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v24.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) statement (which implicitly change the `crdb_internal_region` type). [#124856][#124856] +- Fixed a bug that would occur when [`ALTER TYPE ... DROP VALUE`]({% link v24.1/alter-type.md %}) is followed by `DROP SCHEMA CASCADE ...` in the same transaction. Previously, the `ALTER TYPE` schema change would get queued up to run at commit time, but by that point, the type may have already been removed, so the commit could fail. #123577 +- Tables are now automatically repaired when the errors `invalid inbound foreign key ... origin table ID should be ...` or `invalid outbound foreign key ... reference table ID should be ...` occur. #123668 +- Fixed a bug where a failed [`RESTORE`]({% link v24.1/restore.md %}) could not be retried without manual intervention. #123205 +- Fixed a bug introduced in alpha versions of v23.1 where calling a routine could result in an unexpected `function ... does not exist` error. The bug is triggered when the routine is called twice using the exact same SQL query, and either: (a) the routine has polymorphic arguments, or: (b) between the two calls, the routine is replaced by a routine with the same name and different parameters. #123516 +- Fixed a rare bug where a lease transfer could lead to a `side-transport update saw closed timestamp regression` panic. The bug could occur when a node was overloaded and [failing to heartbeat its node liveness record]({% link v24.1/cluster-setup-troubleshooting.md %}#node-liveness-issues). #123533 +- Fixed a crash introduced in v24.1.0-beta.2 that could occur when planning statistics collection on a table with a [virtual computed column]({% link v24.1/computed-columns.md %}#virtual-computed-columns) using a [user-defined type]({% link v24.1/create-type.md %}) when the newly-introduced [cluster setting]({% link v24.1/cluster-settings.md %}) `sql.stats.virtual_computed_columns.enabled` is set to `true`. (The setting was introduced in v24.1.0-alpha.1, and defaults to `true`.) #124060 +- Fixed a bug where an [`ALTER TABLE ... ALTER PRIMARY KEY`]({% link v24.1/alter-table.md %}#alter-primary-key) statement could hang if the table had any indexes that were referred to by [views]({% link v24.1/views.md %}) or [functions]({% link v24.1/functions-and-operators.md %}) using the `FORCE INDEX` clause. #124323 +- Fixed a bug introduced in v24.1.0 where the `max_decimal_digits` argument of the `st_geojson` [builtin function]({% link v24.1/functions-and-operators.md %}#built-in-functions) was ignored and the default was used instead. #124502 +- Scattering a range with a replication factor of 1 now no longer erroneously up-replicates the range to two replicas. Leases will also no longer thrash between nodes when perturbed with a replication factor of 1. #124453 +- Fixed a bug where, if the `ttl_row_stats_poll_interval` storage parameter was non-zero for a table with [row-level TTL]({% link v24.1/row-level-ttl.md %}) enabled, the queries issued to update row statistics could block the job from completing. Now, if the job completes, these statistics queries are cancelled. This means that the `jobs.row_level_ttl.total_rows` and `jobs.row_level_ttl.total_expired_rows` metrics will report `0` if the job finishes before the row stats queries complete. #124627 +- Fixed a bug where a [`DROP ROLE`]({% link v24.1/drop-role.md %}) or [`DROP USER`]({% link v24.1/drop-user.md %}) command could leave references behind inside types, which could prevent subsequent [`SHOW GRANTS`]({% link v24.1/show-grants.md %}) commands from working. #124619 +- Fixed a bug where the `results_buffer_size` [session variable]({% link v24.1/session-variables.md %}) could not be configured by using the "options" query parameter in the connection string, but only as a top-level query parameter. Now, `results_buffer_size` can be configured in either part of the connection string. This variable still cannot be changed with the [`SET`]({% link v24.1/set-vars.md %}) command after the session begins. #124775 +- Fixed a bug introduced in v20.2 where a change to a [user-defined type]({% link v24.1/create-type.md %}) could cause queries against tables using that type to fail with an error message like `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v24.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v24.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) statement (which implicitly change the `crdb_internal_region` type). #124856

Performance improvements

-- Improved the efficiency of error handling in the [vectorized execution engine]({% link v24.1/vectorized-execution.md %}) in order to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. [#123499][#123499] -- Due to its poor performance, a [changefeed]({% link v24.1/change-data-capture-overview.md %}) optimization that aimed to reduce duplicates during aggregator restarts due to its bad performance has been disabled. [#123595][#123595] +- Improved the efficiency of error handling in the [vectorized execution engine]({% link v24.1/vectorized-execution.md %}) in order to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. #123499 +- Due to its poor performance, a [changefeed]({% link v24.1/change-data-capture-overview.md %}) optimization that aimed to reduce duplicates during aggregator restarts due to its bad performance has been disabled. #123595
@@ -67,35 +67,3 @@ This release includes 173 merged PRs by 42 authors.
-[#123205]: https://github.com/cockroachdb/cockroach/pull/123205 -[#123430]: https://github.com/cockroachdb/cockroach/pull/123430 -[#123468]: https://github.com/cockroachdb/cockroach/pull/123468 -[#123499]: https://github.com/cockroachdb/cockroach/pull/123499 -[#123516]: https://github.com/cockroachdb/cockroach/pull/123516 -[#123533]: https://github.com/cockroachdb/cockroach/pull/123533 -[#123577]: https://github.com/cockroachdb/cockroach/pull/123577 -[#123585]: https://github.com/cockroachdb/cockroach/pull/123585 -[#123595]: https://github.com/cockroachdb/cockroach/pull/123595 -[#123668]: https://github.com/cockroachdb/cockroach/pull/123668 -[#123903]: https://github.com/cockroachdb/cockroach/pull/123903 -[#123967]: https://github.com/cockroachdb/cockroach/pull/123967 -[#124055]: https://github.com/cockroachdb/cockroach/pull/124055 -[#124060]: https://github.com/cockroachdb/cockroach/pull/124060 -[#124085]: https://github.com/cockroachdb/cockroach/pull/124085 -[#124256]: https://github.com/cockroachdb/cockroach/pull/124256 -[#124304]: https://github.com/cockroachdb/cockroach/pull/124304 -[#124323]: https://github.com/cockroachdb/cockroach/pull/124323 -[#124354]: https://github.com/cockroachdb/cockroach/pull/124354 -[#124369]: https://github.com/cockroachdb/cockroach/pull/124369 -[#124453]: https://github.com/cockroachdb/cockroach/pull/124453 -[#124490]: https://github.com/cockroachdb/cockroach/pull/124490 -[#124502]: https://github.com/cockroachdb/cockroach/pull/124502 -[#124509]: https://github.com/cockroachdb/cockroach/pull/124509 -[#124536]: https://github.com/cockroachdb/cockroach/pull/124536 -[#124619]: https://github.com/cockroachdb/cockroach/pull/124619 -[#124627]: https://github.com/cockroachdb/cockroach/pull/124627 -[#124633]: https://github.com/cockroachdb/cockroach/pull/124633 -[#124775]: https://github.com/cockroachdb/cockroach/pull/124775 -[#124856]: https://github.com/cockroachdb/cockroach/pull/124856 -[0f67079b6]: https://github.com/cockroachdb/cockroach/commit/0f67079b6 -[eab42ef7a]: https://github.com/cockroachdb/cockroach/commit/eab42ef7a diff --git a/src/current/_includes/releases/v24.1/v24.1.10.md b/src/current/_includes/releases/v24.1/v24.1.10.md index f4eb01a3db8..69bb3a82b4d 100644 --- a/src/current/_includes/releases/v24.1/v24.1.10.md +++ b/src/current/_includes/releases/v24.1/v24.1.10.md @@ -6,7 +6,7 @@ Release Date: January 9, 2025

Security updates

-- The cluster setting `server.jwt_authentication.issuers` now takes the issuer's configuration value apart from the URI. [#138187][#138187] +- The cluster setting `server.jwt_authentication.issuers` now takes the issuer's configuration value apart from the URI. #138187 - This can be set to one of the following values: 1. A string that Go can parse as a valid issuer URL, e.g., `'https://accounts.google.com'`. 1. A string that can be parsed as valid JSON array of issuer URLs, e.g., `['example.com/adfs','https://accounts.google.com']`. @@ -19,77 +19,44 @@ Release Date: January 9, 2025 - `changefeed.parallel_io_result_queue_nanos` - `changefeed.sink_batch_hist_nanos` - `changefeed.flush_hist_nanos` - - `changefeed.kafka_throttling_hist_nanos` [#136602][#136602] -- Added support for multiple seed brokers in the new Kafka sink. [#136746][#136746] -- Added a new metric (`distsender.rangefeed.catchup_ranges_waiting_client_side`) that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. [#136836][#136836] -- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. [#136017][#136017] [#137594][#137594] -- Added a no-op `AWS_USE_PATH_STYLE` parameter for forward compatibility with v24.3. [#137100][#137100] + - `changefeed.kafka_throttling_hist_nanos` #136602 +- Added support for multiple seed brokers in the new Kafka sink. #136746 +- Added a new metric (`distsender.rangefeed.catchup_ranges_waiting_client_side`) that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. #136836 +- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. #136017 #137594 +- Added a no-op `AWS_USE_PATH_STYLE` parameter for forward compatibility with v24.3. #137100

SQL language changes

-- Added the `legacy_varchar_typing` session setting, which reverts the changes of [#133037](https://github.com/cockroachdb/cockroach/pull/133037) that caused the change in typing behavior described in [#137837](https://github.com/cockroachdb/cockroach/pull/137837). Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. [#137921][#137921] +- Added the `legacy_varchar_typing` session setting, which reverts the changes of #133037 that caused the change in typing behavior described in #137837. Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. #137921

Operational changes

-- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added out of precaution in case it was necessary to revert back to the old behavior for looking up role memberships, but this escape hatch has never been needed in practice since this was added in v23.1. [#136121][#136121] -- Telemetry delivery is now considered successful even in cases where CockroachDB experiences a network timeout. This will prevent throttling in cases outside an operator's control. [#136478][#136478] -- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. [#136951][#136951] +- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added out of precaution in case it was necessary to revert back to the old behavior for looking up role memberships, but this escape hatch has never been needed in practice since this was added in v23.1. #136121 +- Telemetry delivery is now considered successful even in cases where CockroachDB experiences a network timeout. This will prevent throttling in cases outside an operator's control. #136478 +- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. #136951

Bug fixes

-- Fixed a bug where `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. [#136364][#136364] -- Fixed a bug where `CREATE RELATION / TYPE` could leave dangling namespace entries if the schema was concurrently being dropped. [#136408][#136408] -- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. [#136503][#136503] -- Fixed a bug that caused the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136831][#136831] -- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug had been present since v21.2 and is now fixed. [#136164][#136164] -- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. [#136651][#136651] -- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`. The fix ensures proper behavior on cluster restarts. [#137010][#137010] -- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. [#137355][#137355] -- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. [#137112][#137112] -- Fixed a bug that had existed since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. [#137378][#137378] -- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) primary key was locked from within a subquery, e.g.: `SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;`. The error could occur either under `READ COMMITTED` isolation, or with `optimizer_use_lock_op_for_serializable` enabled. [#131397][#131397] -- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. [#137677][#137677] -- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. [#137704][#137704] -- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is active. [#137722][#137722] -- Using more than one `DECLARE` statement in the definition of a user-defined function (UDF) now correctly declares additional variables. [#135739][#135739] -- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. [#136040][#136040] -- Fixed a bug that could cause the password for the `root` user to be deleted while upgrading to v24.1. This bug only affected clusters that were initially created with v22.2 or earlier. The same bug could also cause the `defaultdb` and `postgres` databases to be recreated during the upgrade to v24.1 if they had been previously deleted. [#136074][#136074] -- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug had been present since at least v23.1. [#137790][#137790] +- Fixed a bug where `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. #136364 +- Fixed a bug where `CREATE RELATION / TYPE` could leave dangling namespace entries if the schema was concurrently being dropped. #136408 +- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. #136503 +- Fixed a bug that caused the optimizer to use stale table statistics after altering an `ENUM` type used in the table. #136831 +- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug had been present since v21.2 and is now fixed. #136164 +- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. #136651 +- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`. The fix ensures proper behavior on cluster restarts. #137010 +- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. #137355 +- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. #137112 +- Fixed a bug that had existed since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. #137378 +- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) primary key was locked from within a subquery, e.g.: `SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;`. The error could occur either under `READ COMMITTED` isolation, or with `optimizer_use_lock_op_for_serializable` enabled. #131397 +- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. #137677 +- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. #137704 +- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is active. #137722 +- Using more than one `DECLARE` statement in the definition of a user-defined function (UDF) now correctly declares additional variables. #135739 +- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. #136040 +- Fixed a bug that could cause the password for the `root` user to be deleted while upgrading to v24.1. This bug only affected clusters that were initially created with v22.2 or earlier. The same bug could also cause the `defaultdb` and `postgres` databases to be recreated during the upgrade to v24.1 if they had been previously deleted. #136074 +- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug had been present since at least v23.1. #137790

Performance improvements

-- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. [#136121][#136121] +- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. #136121 -[#131397]: https://github.com/cockroachdb/cockroach/pull/131397 -[#135739]: https://github.com/cockroachdb/cockroach/pull/135739 -[#136017]: https://github.com/cockroachdb/cockroach/pull/136017 -[#136040]: https://github.com/cockroachdb/cockroach/pull/136040 -[#136074]: https://github.com/cockroachdb/cockroach/pull/136074 -[#136121]: https://github.com/cockroachdb/cockroach/pull/136121 -[#136164]: https://github.com/cockroachdb/cockroach/pull/136164 -[#136364]: https://github.com/cockroachdb/cockroach/pull/136364 -[#136408]: https://github.com/cockroachdb/cockroach/pull/136408 -[#136478]: https://github.com/cockroachdb/cockroach/pull/136478 -[#136503]: https://github.com/cockroachdb/cockroach/pull/136503 -[#136602]: https://github.com/cockroachdb/cockroach/pull/136602 -[#136651]: https://github.com/cockroachdb/cockroach/pull/136651 -[#136682]: https://github.com/cockroachdb/cockroach/pull/136682 -[#136708]: https://github.com/cockroachdb/cockroach/pull/136708 -[#136746]: https://github.com/cockroachdb/cockroach/pull/136746 -[#136831]: https://github.com/cockroachdb/cockroach/pull/136831 -[#136836]: https://github.com/cockroachdb/cockroach/pull/136836 -[#136951]: https://github.com/cockroachdb/cockroach/pull/136951 -[#137010]: https://github.com/cockroachdb/cockroach/pull/137010 -[#137100]: https://github.com/cockroachdb/cockroach/pull/137100 -[#137112]: https://github.com/cockroachdb/cockroach/pull/137112 -[#137114]: https://github.com/cockroachdb/cockroach/pull/137114 -[#137355]: https://github.com/cockroachdb/cockroach/pull/137355 -[#137378]: https://github.com/cockroachdb/cockroach/pull/137378 -[#137459]: https://github.com/cockroachdb/cockroach/pull/137459 -[#137594]: https://github.com/cockroachdb/cockroach/pull/137594 -[#137677]: https://github.com/cockroachdb/cockroach/pull/137677 -[#137704]: https://github.com/cockroachdb/cockroach/pull/137704 -[#137722]: https://github.com/cockroachdb/cockroach/pull/137722 -[#137790]: https://github.com/cockroachdb/cockroach/pull/137790 -[#137921]: https://github.com/cockroachdb/cockroach/pull/137921 -[#138187]: https://github.com/cockroachdb/cockroach/pull/138187 diff --git a/src/current/_includes/releases/v24.1/v24.1.11.md b/src/current/_includes/releases/v24.1/v24.1.11.md index 593b1999cbd..f3b8d58f191 100644 --- a/src/current/_includes/releases/v24.1/v24.1.11.md +++ b/src/current/_includes/releases/v24.1/v24.1.11.md @@ -6,10 +6,9 @@ Release Date: January 31, 2025

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. [#140177][#140177] +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. #140177 {{site.data.alerts.callout_danger}} This fix is present in v24.1.11 and [v24.1.13](#v24-1-13), but was **not** released in [v24.1.12](#v24-1-12). {{site.data.alerts.end}} -[#140177]: https://github.com/cockroachdb/cockroach/pull/140177 diff --git a/src/current/_includes/releases/v24.1/v24.1.12.md b/src/current/_includes/releases/v24.1/v24.1.12.md index 06413d38bf6..83f23976328 100644 --- a/src/current/_includes/releases/v24.1/v24.1.12.md +++ b/src/current/_includes/releases/v24.1/v24.1.12.md @@ -6,47 +6,33 @@ Release Date: February 6, 2025

General changes

-- The protected timestamp records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. [#138668][#138668] +- The protected timestamp records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. #138668

Backward-incompatible changes

-- In [v24.1.11](#v24-1-11-bug-fixes), a bug was fixed that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. This fix is **not** present in v24.1.12, but has been released in [v24.1.13](#v24-1-13). [#140177][#140177] +- In [v24.1.11](#v24-1-11-bug-fixes), a bug was fixed that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. This fix is **not** present in v24.1.12, but has been released in [v24.1.13](#v24-1-13). #140177

SQL language changes

-- Since v23.2 table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. [#140268][#140268] -- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, CockroachDB does consider it a full scan. [#140272][#140272] -- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. [#140272][#140272] -- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of zero, which is the default, indicates no lower bound. Note that if this is set to a value greater than zero, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. [#140272][#140272] +- Since v23.2 table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. #140268 +- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, CockroachDB does consider it a full scan. #140272 +- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. #140272 +- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of zero, which is the default, indicates no lower bound. Note that if this is set to a value greater than zero, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. #140272

Operational changes

-- Added new metrics that expose the TTL for various certificates. [#138659][#138659] -- Introduced the metric `sql.schema_changer.object_count` that keeps track of the count of objects in the cluster. [#138839][#138839] +- Added new metrics that expose the TTL for various certificates. #138659 +- Introduced the metric `sql.schema_changer.object_count` that keeps track of the count of objects in the cluster. #138839

Bug fixes

-- `ALTER BACKUP SCHEDULE` no longer fails on schedules with a collection URI that contains a space. [#138080][#138080] -- Previously, `SHOW CREATE TABLE` was showing incorrect data with regards to inverted indexes. It now shows the correct data that can be repeatedly entered back into CockroachDB to recreate the same table. [#138168][#138168] -- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. [#138285][#138285] -- Fixed a timing issue between `ALTER VIEW .. RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. [#137887][#137887] -- Queries that perform a cast from the string representation of an array containing `GEOMETRY` or `GEOGRAPHY` types to a SQL array type will now succeed. [#138693][#138693] -- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. [#138659][#138659] -- When the session variable `allow_role_memberships_to_change_during_transaction` is set, it is now possible to create and drop users quickly even when there are contending transactions on the `system.users` and `system.role_options` system tables. [#139027][#139027] -- Fixed a bug where the error `batch timestamp ... must be after replica GC threshold` could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the `failed` state. [#139248][#139248] -- CockroachDB could previously hit a bounded memory leak when collecting table statistics on a table that had both very wide (10KiB or more) and relatively small (under 400B) `BYTES`-like values within the same row. This has been present since before v19.2. Additionally, in v24.1.0, a bug was introduced that made this leak also apply to `STRING`-like values. [#139174][#139174] - -[#137887]: https://github.com/cockroachdb/cockroach/pull/137887 -[#138080]: https://github.com/cockroachdb/cockroach/pull/138080 -[#138168]: https://github.com/cockroachdb/cockroach/pull/138168 -[#138285]: https://github.com/cockroachdb/cockroach/pull/138285 -[#138659]: https://github.com/cockroachdb/cockroach/pull/138659 -[#138668]: https://github.com/cockroachdb/cockroach/pull/138668 -[#138693]: https://github.com/cockroachdb/cockroach/pull/138693 -[#138839]: https://github.com/cockroachdb/cockroach/pull/138839 -[#139027]: https://github.com/cockroachdb/cockroach/pull/139027 -[#139174]: https://github.com/cockroachdb/cockroach/pull/139174 -[#139248]: https://github.com/cockroachdb/cockroach/pull/139248 -[#140268]: https://github.com/cockroachdb/cockroach/pull/140268 -[#140272]: https://github.com/cockroachdb/cockroach/pull/140272 -[#140177]: https://github.com/cockroachdb/cockroach/pull/140177 +- `ALTER BACKUP SCHEDULE` no longer fails on schedules with a collection URI that contains a space. #138080 +- Previously, `SHOW CREATE TABLE` was showing incorrect data with regards to inverted indexes. It now shows the correct data that can be repeatedly entered back into CockroachDB to recreate the same table. #138168 +- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. #138285 +- Fixed a timing issue between `ALTER VIEW .. RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. #137887 +- Queries that perform a cast from the string representation of an array containing `GEOMETRY` or `GEOGRAPHY` types to a SQL array type will now succeed. #138693 +- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. #138659 +- When the session variable `allow_role_memberships_to_change_during_transaction` is set, it is now possible to create and drop users quickly even when there are contending transactions on the `system.users` and `system.role_options` system tables. #139027 +- Fixed a bug where the error `batch timestamp ... must be after replica GC threshold` could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the `failed` state. #139248 +- CockroachDB could previously hit a bounded memory leak when collecting table statistics on a table that had both very wide (10KiB or more) and relatively small (under 400B) `BYTES`-like values within the same row. This has been present since before v19.2. Additionally, in v24.1.0, a bug was introduced that made this leak also apply to `STRING`-like values. #139174 + diff --git a/src/current/_includes/releases/v24.1/v24.1.13.md b/src/current/_includes/releases/v24.1/v24.1.13.md index 9af1aeea050..8297ce5980a 100644 --- a/src/current/_includes/releases/v24.1/v24.1.13.md +++ b/src/current/_includes/releases/v24.1/v24.1.13.md @@ -6,10 +6,9 @@ Release Date: February 19, 2025

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. [#141657][#141657] +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. #141657 {{site.data.alerts.callout_danger}} This fix is present in v24.1.11 and [v24.1.13](#v24-1-13), but was **not** released in [v24.1.12](#v24-1-12). {{site.data.alerts.end}} -[#141657]: https://github.com/cockroachdb/cockroach/pull/141657 diff --git a/src/current/_includes/releases/v24.1/v24.1.14.md b/src/current/_includes/releases/v24.1/v24.1.14.md index 9ab9e01b90f..2fd662382f7 100644 --- a/src/current/_includes/releases/v24.1/v24.1.14.md +++ b/src/current/_includes/releases/v24.1/v24.1.14.md @@ -6,48 +6,30 @@ Release Date: March 6, 2025

SQL language changes

-- Since v23.2, table statistics histograms have been collected for non-indexed `JSONB` columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. [#140142][#140142] -- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, we do consider it a full scan. [#140259][#140259] -- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. [#140259][#140259] -- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of `0`, which is the default, indicates no lower bound. Note that if this is set to a value greater than 0, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. [#140259][#140259] -- Fixed a bug existing only in pre-release versions of v25.1 which could cause unexpected errors during planning for `VALUES` expressions containing function calls with multiple overloads. [#140648][#140648] -- Added the `optimizer_check_input_min_row_count` session setting to control the minimum row count estimate for buffer scans of foreign key and uniqueness checks. It defaults to `0`. [#141378][#141378] +- Since v23.2, table statistics histograms have been collected for non-indexed `JSONB` columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. #140142 +- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, we do consider it a full scan. #140259 +- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. #140259 +- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of `0`, which is the default, indicates no lower bound. Note that if this is set to a value greater than 0, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. #140259 +- Fixed a bug existing only in pre-release versions of v25.1 which could cause unexpected errors during planning for `VALUES` expressions containing function calls with multiple overloads. #140648 +- Added the `optimizer_check_input_min_row_count` session setting to control the minimum row count estimate for buffer scans of foreign key and uniqueness checks. It defaults to `0`. #141378

Operational changes

-- The `changefeed.max_behind_nanos` metric now supports scoping with metric labels. [#139239][#139239] +- The `changefeed.max_behind_nanos` metric now supports scoping with metric labels. #139239

Command-line changes

-- Improved the performance of the debug zip query that collects `transaction_contention_events` data, reducing the chances of "memory budget exceeded" or "query execution canceled due to statement timeout" errors. [#139752][#139752] +- Improved the performance of the debug zip query that collects `transaction_contention_events` data, reducing the chances of "memory budget exceeded" or "query execution canceled due to statement timeout" errors. #139752

Bug fixes

-- Fixed a bug that, under rare circumstances, could cause draining a node to fail with the error message "some sessions did not respond to cancellation within 1s". [#139477][#139477] -- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug had existed since v23.1. [#136127][#136127] -- Fixed a memory leak that could previously occur when evaluating some memory-intensive queries via the vectorized engine in CockroachDB. The leak had been present since v20.2. [#139097][#139097] -- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a "batch timestamp must be after replica GC threshold" error. [#140086][#140086] -- Removed duplicate columns in the Parquet output from changefeeds using CDC queries. [#140151][#140151] -- Fixed a rare bug in which a query might fail with the error "could not find computed column expression for column in table" while dropping a virtual computed column from the table. This bug was introduced in v23.2.4. [#139873][#139873] -- Fixed a bug that would cause an internal error when the result of a `RECORD`-returning user-defined function (UDF) was wrapped by another expression (such as `COALESCE`) within a `VALUES` clause. [#140648][#140648] -- The **Data Distribution** report on the **Advanced Debug** page will no longer crash if there are null values for `raw_sql_config` in `crdb_internal.zones`. [#140659][#140659] -- Upgraded the Sarama Kafka client library to pick up a fix for a race condition bug that could occur when Kafka throttling was enabled. [#140157][#140157] - -[#136127]: https://github.com/cockroachdb/cockroach/pull/136127 -[#139097]: https://github.com/cockroachdb/cockroach/pull/139097 -[#139239]: https://github.com/cockroachdb/cockroach/pull/139239 -[#139477]: https://github.com/cockroachdb/cockroach/pull/139477 -[#139752]: https://github.com/cockroachdb/cockroach/pull/139752 -[#139873]: https://github.com/cockroachdb/cockroach/pull/139873 -[#140086]: https://github.com/cockroachdb/cockroach/pull/140086 -[#140142]: https://github.com/cockroachdb/cockroach/pull/140142 -[#140151]: https://github.com/cockroachdb/cockroach/pull/140151 -[#140157]: https://github.com/cockroachdb/cockroach/pull/140157 -[#140249]: https://github.com/cockroachdb/cockroach/pull/140249 -[#140259]: https://github.com/cockroachdb/cockroach/pull/140259 -[#140281]: https://github.com/cockroachdb/cockroach/pull/140281 -[#140648]: https://github.com/cockroachdb/cockroach/pull/140648 -[#140659]: https://github.com/cockroachdb/cockroach/pull/140659 -[#140990]: https://github.com/cockroachdb/cockroach/pull/140990 -[#141138]: https://github.com/cockroachdb/cockroach/pull/141138 -[#141378]: https://github.com/cockroachdb/cockroach/pull/141378 +- Fixed a bug that, under rare circumstances, could cause draining a node to fail with the error message "some sessions did not respond to cancellation within 1s". #139477 +- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug had existed since v23.1. #136127 +- Fixed a memory leak that could previously occur when evaluating some memory-intensive queries via the vectorized engine in CockroachDB. The leak had been present since v20.2. #139097 +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a "batch timestamp must be after replica GC threshold" error. #140086 +- Removed duplicate columns in the Parquet output from changefeeds using CDC queries. #140151 +- Fixed a rare bug in which a query might fail with the error "could not find computed column expression for column in table" while dropping a virtual computed column from the table. This bug was introduced in v23.2.4. #139873 +- Fixed a bug that would cause an internal error when the result of a `RECORD`-returning user-defined function (UDF) was wrapped by another expression (such as `COALESCE`) within a `VALUES` clause. #140648 +- The **Data Distribution** report on the **Advanced Debug** page will no longer crash if there are null values for `raw_sql_config` in `crdb_internal.zones`. #140659 +- Upgraded the Sarama Kafka client library to pick up a fix for a race condition bug that could occur when Kafka throttling was enabled. #140157 + diff --git a/src/current/_includes/releases/v24.1/v24.1.15.md b/src/current/_includes/releases/v24.1/v24.1.15.md index 54ac6c48d17..c5171abf826 100644 --- a/src/current/_includes/releases/v24.1/v24.1.15.md +++ b/src/current/_includes/releases/v24.1/v24.1.15.md @@ -6,39 +6,25 @@ Release Date: April 3, 2025

Operational changes

-- Added the `sql.statement_timeout.count` metric to track the number of SQL statements that fail due to exceeding the statement timeout. [#142157][#142157] -- Added the `sql.transaction_timeout.count` metric to track the number of SQL statements that fail due to exceeding the transaction timeout. [#142157][#142157] -- The `node decommission` CLI command now waits until the target node is fully drained before marking it as decommissioned. Previously, the command would start draining but not wait, leaving the node in an unstable state where it could still accept client requests while being unable to communicate with the cluster, causing those requests to hang or fail with unexpected errors. [#142427][#142427] +- Added the `sql.statement_timeout.count` metric to track the number of SQL statements that fail due to exceeding the statement timeout. #142157 +- Added the `sql.transaction_timeout.count` metric to track the number of SQL statements that fail due to exceeding the transaction timeout. #142157 +- The `node decommission` CLI command now waits until the target node is fully drained before marking it as decommissioned. Previously, the command would start draining but not wait, leaving the node in an unstable state where it could still accept client requests while being unable to communicate with the cluster, causing those requests to hang or fail with unexpected errors. #142427

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a "batch timestamp must be after replica GC threshold" error. [#141723][#141723] -- Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index using the primary key column as the predicate expression. [#141824][#141824] -- Fixed a bug where `CREATE SEQUENCE` without concurrent DDL operations could hit a retry error due to incorrect schema modification. [#142609][#142609] -- Fixed a bug where CockroachDB could incorrectly evaluate casts to some OID types (e.g., `REGCLASS`). This issue had been present since at least v22.1. [#141959][#141959] -- Fixed a bug that could cause `nil pointer dereference` errors when executing statements with user-defined functions (UDFs) or certain built-in functions like `obj_description`. [#141651][#141651] -- Fixed a bug where nodes drained during decommissioning could interrupt active SQL connections unexpectedly, even when drain was expected to wait for them to complete. [#142816][#142816] -- Fixed a bug where the fraction completed and internal checkpoints during an index backfill operation would stop being written if any periodic fraction/checkpoint write failed. Progress is now additionally logged to aid debugging. This bug affected schema changes such as creating an index or adding a non-nullable column. [#141786][#141786] -- Fixed a bug that could prevent `SHOW CREATE TABLE` from working if a database was offline (e.g., due to a `RESTORE`). [#141510][#141510] -- Fixed a bug where tuple labels were sometimes disregarded, causing unexpected behavior, such as when converting a tuple to `JSON` with `to_jsonb`. This bug existed since v22.1.0 and became more likely to cause issues after changes in v24.1.7. [#142137][#142137] -- Fixed a bug where the declarative schema changer allowed `CREATE SEQUENCE` operations to proceed even while a `DROP SCHEMA` or `DROP DATABASE` was in progress. Such operations now retry if the parent object has a schema change in progress. [#142761][#142761] -- Fixed a bug in `v24.1.14`, `v24.3.7`, `v24.3.8`, and `v25.1` that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. [#143637][#143637] +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a "batch timestamp must be after replica GC threshold" error. #141723 +- Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index using the primary key column as the predicate expression. #141824 +- Fixed a bug where `CREATE SEQUENCE` without concurrent DDL operations could hit a retry error due to incorrect schema modification. #142609 +- Fixed a bug where CockroachDB could incorrectly evaluate casts to some OID types (e.g., `REGCLASS`). This issue had been present since at least v22.1. #141959 +- Fixed a bug that could cause `nil pointer dereference` errors when executing statements with user-defined functions (UDFs) or certain built-in functions like `obj_description`. #141651 +- Fixed a bug where nodes drained during decommissioning could interrupt active SQL connections unexpectedly, even when drain was expected to wait for them to complete. #142816 +- Fixed a bug where the fraction completed and internal checkpoints during an index backfill operation would stop being written if any periodic fraction/checkpoint write failed. Progress is now additionally logged to aid debugging. This bug affected schema changes such as creating an index or adding a non-nullable column. #141786 +- Fixed a bug that could prevent `SHOW CREATE TABLE` from working if a database was offline (e.g., due to a `RESTORE`). #141510 +- Fixed a bug where tuple labels were sometimes disregarded, causing unexpected behavior, such as when converting a tuple to `JSON` with `to_jsonb`. This bug existed since v22.1.0 and became more likely to cause issues after changes in v24.1.7. #142137 +- Fixed a bug where the declarative schema changer allowed `CREATE SEQUENCE` operations to proceed even while a `DROP SCHEMA` or `DROP DATABASE` was in progress. Such operations now retry if the parent object has a schema change in progress. #142761 +- Fixed a bug in `v24.1.14`, `v24.3.7`, `v24.3.8`, and `v25.1` that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. #143637

Miscellaneous

-- Configuring the `sql.ttl.default_delete_rate_limit` cluster setting now displays a notice clarifying that the TTL rate limit is per leaseholder per table, with a link to the documentation. [#142832][#142832] - -[#142157]: https://github.com/cockroachdb/cockroach/pull/142157 -[#142427]: https://github.com/cockroachdb/cockroach/pull/142427 -[#141510]: https://github.com/cockroachdb/cockroach/pull/141510 -[#142761]: https://github.com/cockroachdb/cockroach/pull/142761 -[#141824]: https://github.com/cockroachdb/cockroach/pull/141824 -[#141959]: https://github.com/cockroachdb/cockroach/pull/141959 -[#141651]: https://github.com/cockroachdb/cockroach/pull/141651 -[#143637]: https://github.com/cockroachdb/cockroach/pull/143637 -[#142609]: https://github.com/cockroachdb/cockroach/pull/142609 -[#141786]: https://github.com/cockroachdb/cockroach/pull/141786 -[#142832]: https://github.com/cockroachdb/cockroach/pull/142832 -[#141723]: https://github.com/cockroachdb/cockroach/pull/141723 -[#142816]: https://github.com/cockroachdb/cockroach/pull/142816 -[#142137]: https://github.com/cockroachdb/cockroach/pull/142137 +- Configuring the `sql.ttl.default_delete_rate_limit` cluster setting now displays a notice clarifying that the TTL rate limit is per leaseholder per table, with a link to the documentation. #142832 + diff --git a/src/current/_includes/releases/v24.1/v24.1.16.md b/src/current/_includes/releases/v24.1/v24.1.16.md index 80d349a6a3e..accd917d9a8 100644 --- a/src/current/_includes/releases/v24.1/v24.1.16.md +++ b/src/current/_includes/releases/v24.1/v24.1.16.md @@ -7,7 +7,6 @@ Release Date: April 9, 2025

Bug fixes

- Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144061][#144061] + #144061 -[#144061]: https://github.com/cockroachdb/cockroach/pull/144061 diff --git a/src/current/_includes/releases/v24.1/v24.1.17.md b/src/current/_includes/releases/v24.1/v24.1.17.md index dc63a7226e8..448e3aee6bf 100644 --- a/src/current/_includes/releases/v24.1/v24.1.17.md +++ b/src/current/_includes/releases/v24.1/v24.1.17.md @@ -6,6 +6,5 @@ Release Date: April 28, 2025

Bug fixes

-- Fixed a rare corruption bug that impacts import and materialized views. [#144660][#144660] +- Fixed a rare corruption bug that impacts import and materialized views. #144660 -[#144660]: https://github.com/cockroachdb/cockroach/pull/144660 diff --git a/src/current/_includes/releases/v24.1/v24.1.18.md b/src/current/_includes/releases/v24.1/v24.1.18.md index 1e694b1dd68..420b3e3ae41 100644 --- a/src/current/_includes/releases/v24.1/v24.1.18.md +++ b/src/current/_includes/releases/v24.1/v24.1.18.md @@ -7,38 +7,27 @@ Release Date: April 30, 2025

SQL language changes

- `EXPLAIN ANALYZE` statements now display the number of transaction retries and time spent retrying, if non-zero, in the plan output. - [#142931][#142931] + #142931 - Added the `WITH IGNORE_FOREIGN_KEYS` option to `SHOW CREATE TABLE` which omits foreign key constraints from the output schema. This option is also allowed in `SHOW CREATE VIEW`, but has no effect. It cannot be combined with the `WITH REDACT` option. - [#142164][#142164] + #142164

Bug fixes

-- Fixed a rare corruption bug that impacts import and materialized views. [#144689][#144689] +- Fixed a rare corruption bug that impacts import and materialized views. #144689 - Fixed a bug that caused changefeeds to fail on startup when scanning a single key. - [#143148][#143148] + #143148 - Fixed a bug in the client certificate expiration metrics `security.certificate.expiration.client` and `security.certificate.ttl.client`. - [#142915][#142915] + #142915 - Fixed a bug in v24.1.14, v24.3.7, v24.3.8, and v25.1 that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. - [#143638][#143638] + #143638 - MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amount of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. - [#143275][#143275] + #143275 - Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144035][#144035] + #144035 - Fixed a bug where CockroachDB would encounter an internal error when decoding the gists of plans with `CALL` statements. The bug had been present since v23.2. - [#143313][#143313] + #143313 - Fixed a bug where calling a stored procedure could drop the procedure if it had `OUT` parameters that were not used by the calling routine. This bug had existed since PL/pgSQL `CALL` statements were introduced in v24.1. - [#143288][#143288] + #143288 - Previously, the fields `maximum memory usage` and `max sql temp disk usage` in the `EXPLAIN ANALYZE` output could be under-reported for distributed plans when memory-intensive operations were fully performed on the remote nodes. This is now fixed. The bug existed in v22.1 and later. - [#143792][#143792] + #143792 -[#143313]: https://github.com/cockroachdb/cockroach/pull/143313 -[#143288]: https://github.com/cockroachdb/cockroach/pull/143288 -[#143792]: https://github.com/cockroachdb/cockroach/pull/143792 -[#142931]: https://github.com/cockroachdb/cockroach/pull/142931 -[#142164]: https://github.com/cockroachdb/cockroach/pull/142164 -[#143638]: https://github.com/cockroachdb/cockroach/pull/143638 -[#143275]: https://github.com/cockroachdb/cockroach/pull/143275 -[#144035]: https://github.com/cockroachdb/cockroach/pull/144035 -[#143148]: https://github.com/cockroachdb/cockroach/pull/143148 -[#142915]: https://github.com/cockroachdb/cockroach/pull/142915 -[#144689]: https://github.com/cockroachdb/cockroach/pull/144689 diff --git a/src/current/_includes/releases/v24.1/v24.1.19.md b/src/current/_includes/releases/v24.1/v24.1.19.md index 834db74b345..fa6a609b582 100644 --- a/src/current/_includes/releases/v24.1/v24.1.19.md +++ b/src/current/_includes/releases/v24.1/v24.1.19.md @@ -7,54 +7,40 @@ Release Date: May 28, 2025

Operational changes

- Changed the default value of the cluster setting `admission.l0_file_count_overload_threshold` to `4000`. - [#145920][#145920] + #145920

DB Console changes

- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. - [#145987][#145987] + #145987

Bug fixes

- Fixed a bug where CockroachDB could encounter a `cannot specify timestamp older than ...` error during table statistics collection in some cases (e.g., when the cluster is overloaded). The bug was present since v19.1. - [#144519][#144519] + #144519 - Fixed a bug in the DB Console where tables with page size dropdowns failed to update when a new page size option was selected. Tables now update correctly. - [#144768][#144768] + #144768 - Fixed the following bugs in the **Schedules** page of the DB Console: - Fixed a bug where the **Schedules** page displayed only a subset of a cluster's schedules. The **Schedules** page now correctly displays all schedules. - Fixed a bug where manually updating the `show` or `status` parameters in the URL (e.g., `http://127.0.0.1:8080/#/schedules?status=ACTIVE&show=50`) caused the **Schedules** page to fail to load. - [#144804][#144804] + #144804 - Fixed a bug in the **SQL Activity Statements** page where filtering by **Statement Type** returned no results. The filter now works as expected. - [#144853][#144853] + #144853 - Improved the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. - [#145077][#145077] + #145077 - Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. - [#145542][#145542] + #145542 - Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. - [#145577][#145577] + #145577 - Fixed a bug where orphaned leases were not properly cleaned up. - [#146111][#146111] + #146111 - Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. - [#146199][#146199] + #146199 - Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. - [#146310][#146310] + #146310 - Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. - [#146323][#146323]\ + #146323\ - Fixed a rare corruption bug that impacts import and materialized views. - [#144660][#144660] - - -[#144804]: https://github.com/cockroachdb/cockroach/pull/144804 -[#145577]: https://github.com/cockroachdb/cockroach/pull/145577 -[#146111]: https://github.com/cockroachdb/cockroach/pull/146111 -[#146199]: https://github.com/cockroachdb/cockroach/pull/146199 -[#146310]: https://github.com/cockroachdb/cockroach/pull/146310 -[#144660]: https://github.com/cockroachdb/cockroach/pull/144660 -[#145920]: https://github.com/cockroachdb/cockroach/pull/145920 -[#145987]: https://github.com/cockroachdb/cockroach/pull/145987 -[#144768]: https://github.com/cockroachdb/cockroach/pull/144768 -[#145077]: https://github.com/cockroachdb/cockroach/pull/145077 -[#146323]: https://github.com/cockroachdb/cockroach/pull/146323 -[#144519]: https://github.com/cockroachdb/cockroach/pull/144519 -[#144853]: https://github.com/cockroachdb/cockroach/pull/144853 -[#145542]: https://github.com/cockroachdb/cockroach/pull/145542 + #144660 + + diff --git a/src/current/_includes/releases/v24.1/v24.1.2.md b/src/current/_includes/releases/v24.1/v24.1.2.md index 5ae9d4aed7e..c1cf907e2eb 100644 --- a/src/current/_includes/releases/v24.1/v24.1.2.md +++ b/src/current/_includes/releases/v24.1/v24.1.2.md @@ -6,47 +6,47 @@ Release Date: July 2, 2024

{{ site.data.products.enterprise }} edition changes

-- Added error messages for unsupported Apache Pulsar [changefeed]({% link v24.1/create-changefeed.md %}) sink parameters, e.g. `topic_prefix is not yet supported`. [#124666][#124666] +- Added error messages for unsupported Apache Pulsar [changefeed]({% link v24.1/create-changefeed.md %}) sink parameters, e.g. `topic_prefix is not yet supported`. #124666 - Fixed a bug that was present since v22.2 where [changefeeds]({% link v24.1/change-data-capture-overview.md %}) with long-running [initial scans]({% link v24.1/create-changefeed.md %}#initial-scan) might incorrectly restore checkpoint job progress and drop events during [changefeed restarts]({% link v24.1/changefeed-messages.md %}#duplicate-messages) due to transient errors or node restarts. The bug was most likely to occur in clusters with the following contributing factors: - The `changefeed.shutdown_checkpoint.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) was enabled. - The cluster settings `changefeed.frontier_checkpoint_frequency` and `low changefeed.frontier_highwater_lag_checkpoint_threshold` were set low, which resulted in the initial scan taking many multiples of the configured frequency to complete. - There were multiple target tables with significant differences in row counts in one changefeed. - The changefeed target tables were large with many ranges. - - The initial scan took a long time to complete (an hour or longer). [#124996][#124996] + - The initial scan took a long time to complete (an hour or longer). #124996

Operational changes

-- Improved [disk usage metric reporting]({% link v24.1/ui-cluster-overview-page.md %}#capacity-metrics) over volumes that dynamically change their size over the life of the `cockroach` process. [#125050][#125050] -- The default values for the following [cluster settings]({% link v24.1/cluster-settings.md %}) were updated from `100000` to `7500`. These settings control the maximum number of [fingerprints]({% link v24.1/ui-statements-page.md %}#sql-statement-fingerprints) (distinct combinations of statements and [transactions]({% link v24.1/transactions.md %})) CockroachDB can store in memory, with stats being flushed on a `10m` [interval]({% link v24.1/interval.md %}) by default. You can increase these settings if your workload produces more unique fingerprints than this amount within the flush interval, and you notice that SQL stats are missing. [cockroachdb/cockroach#125554][#125554] +- Improved [disk usage metric reporting]({% link v24.1/ui-cluster-overview-page.md %}#capacity-metrics) over volumes that dynamically change their size over the life of the `cockroach` process. #125050 +- The default values for the following [cluster settings]({% link v24.1/cluster-settings.md %}) were updated from `100000` to `7500`. These settings control the maximum number of [fingerprints]({% link v24.1/ui-statements-page.md %}#sql-statement-fingerprints) (distinct combinations of statements and [transactions]({% link v24.1/transactions.md %})) CockroachDB can store in memory, with stats being flushed on a `10m` [interval]({% link v24.1/interval.md %}) by default. You can increase these settings if your workload produces more unique fingerprints than this amount within the flush interval, and you notice that SQL stats are missing. [cockroachdb/cockroach#125554]#125554 - [`sql.metrics.max_mem_stmt_fingerprints`]({% link v24.1/cluster-settings.md %}#setting-sql-metrics-max-mem-stmt-fingerprints) - [`sql.metrics.max_mem_txn_fingerprints`]({% link v24.1/cluster-settings.md %}#setting-sql-metrics-max-mem-txn-fingerprints)

DB Console changes

-- The favicon now renders properly for [DB Console]({% link v24.1/ui-overview.md %}), along with other image files. [#122706][#122706] +- The favicon now renders properly for [DB Console]({% link v24.1/ui-overview.md %}), along with other image files. #122706

Bug fixes

-- [`SHOW TYPES`]({% link v24.1/show-types.md %}) now includes [user-defined composite types]({% link v24.1/create-type.md %}#create-a-composite-data-type). It had omitted those types ever since composite types were added in v23.1. [#124817][#124817] -- Fixed a crash introduced in v24.1.0-beta.2 that could occur when planning [statistics]({% link v24.1/cost-based-optimizer.md %}#table-statistics) collection on a table with a [virtual computed column]({% link v24.1/computed-columns.md %}) using a [user-defined type]({% link v24.1/create-type.md %}) when the [cluster setting]({% link v24.1/cluster-settings.md %}#setting-sql-stats-virtual-computed-columns-enabled) `sql.stats.virtual_computed_columns.enabled` (introduced in v24.1.0-alpha.1) was set to `true`. [#124996][#124996] -- Fixed handling in the [declarative schema changer]({% link v24.1/online-schema-changes.md %}) when columns are included in the `STORING()` clause of a [`CREATE INDEX`]({% link v24.1/create-index.md %}) statement. CockroachDB now checks if the column is virtual up-front, and properly detects when a column is already handled by an existing [`INDEX`]({% link v24.1/indexes.md %}) when the column name has UTF-8 characters. [#125153][#125153] -- Fixed a bug where a change to a [user-defined type]({% link v24.1/create-type.md %}) could cause queries against tables using that type to fail with an error message like: `"histogram.go:694: span must be fully contained in the bucket"`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v24.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v24.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) statement (which implicitly changes the `crdb_internal_region` type). This bug had existed since UDTs were introduced in v20.2. [#124810][#124810] -- Fixed a bug where [telemetry logs]({% link v24.1/logging.md %}#telemetry) had the same [statement fingerprint]({% link v24.1/ui-statements-page.md %}#sql-statement-fingerprints) ID for different SQL statements. [#125000][#125000] -- Fixed an issue where [adding a column]({% link v24.1/alter-table.md %}#add-column) with a default value of an empty [array]({% link v24.1/array.md %}) would not succeed. [#125325][#125325] -- [`ALTER TABLE ... ADD CONSTRAINT UNIQUE`]({% link v24.1/alter-table.md %}) will now fail with a well-formed error message and code `42601` if a statement tries to add a [unique constraint]({% link v24.1/unique.md %}) on an expression. [#125417][#125417] -- Fixed a bug where the [`public` schema]({% link v24.1/schema-design-overview.md %}#schemas) would be created with the wrong owner. Previously the [`admin` role](https://www.cockroachlabs.com/docs/v24.1/security-reference/authorization#roles) would own the `public` schema. Now, the database owner is also the owner of the `public` schema. The owner can be altered after the schema is created. [#125533][#125533] -- Fixed a bug in v24.1, v23.2, and v23.1 where using the `changefeed.aggregator.flush_jitter` [cluster setting]({% link v24.1/cluster-settings.md %}#setting-changefeed-aggregator-flush-jitter) with the [`min_checkpoint_frequency`]({% link v24.1/create-changefeed.md %}#min-checkpoint-frequency) option set to `0` could cause panics. [#125459][#125459] -- The log message `"expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat"` is no longer generated. [#125449][#125449] -- Fixed a bug introduced in v23.2.0 in which CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v24.1/insert.md %}) into [`REGIONAL BY ROW`]({% link v24.1/table-localities.md %}#regional-by-row-tables) tables where the source was a `VALUES` clause with a single row and at least one boolean expression. [#126209][#126209] +- [`SHOW TYPES`]({% link v24.1/show-types.md %}) now includes [user-defined composite types]({% link v24.1/create-type.md %}#create-a-composite-data-type). It had omitted those types ever since composite types were added in v23.1. #124817 +- Fixed a crash introduced in v24.1.0-beta.2 that could occur when planning [statistics]({% link v24.1/cost-based-optimizer.md %}#table-statistics) collection on a table with a [virtual computed column]({% link v24.1/computed-columns.md %}) using a [user-defined type]({% link v24.1/create-type.md %}) when the [cluster setting]({% link v24.1/cluster-settings.md %}#setting-sql-stats-virtual-computed-columns-enabled) `sql.stats.virtual_computed_columns.enabled` (introduced in v24.1.0-alpha.1) was set to `true`. #124996 +- Fixed handling in the [declarative schema changer]({% link v24.1/online-schema-changes.md %}) when columns are included in the `STORING()` clause of a [`CREATE INDEX`]({% link v24.1/create-index.md %}) statement. CockroachDB now checks if the column is virtual up-front, and properly detects when a column is already handled by an existing [`INDEX`]({% link v24.1/indexes.md %}) when the column name has UTF-8 characters. #125153 +- Fixed a bug where a change to a [user-defined type]({% link v24.1/create-type.md %}) could cause queries against tables using that type to fail with an error message like: `"histogram.go:694: span must be fully contained in the bucket"`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v24.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ... ADD REGION`]({% link v24.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) statement (which implicitly changes the `crdb_internal_region` type). This bug had existed since UDTs were introduced in v20.2. #124810 +- Fixed a bug where [telemetry logs]({% link v24.1/logging.md %}#telemetry) had the same [statement fingerprint]({% link v24.1/ui-statements-page.md %}#sql-statement-fingerprints) ID for different SQL statements. #125000 +- Fixed an issue where [adding a column]({% link v24.1/alter-table.md %}#add-column) with a default value of an empty [array]({% link v24.1/array.md %}) would not succeed. #125325 +- [`ALTER TABLE ... ADD CONSTRAINT UNIQUE`]({% link v24.1/alter-table.md %}) will now fail with a well-formed error message and code `42601` if a statement tries to add a [unique constraint]({% link v24.1/unique.md %}) on an expression. #125417 +- Fixed a bug where the [`public` schema]({% link v24.1/schema-design-overview.md %}#schemas) would be created with the wrong owner. Previously the [`admin` role](https://www.cockroachlabs.com/docs/v24.1/security-reference/authorization#roles) would own the `public` schema. Now, the database owner is also the owner of the `public` schema. The owner can be altered after the schema is created. #125533 +- Fixed a bug in v24.1, v23.2, and v23.1 where using the `changefeed.aggregator.flush_jitter` [cluster setting]({% link v24.1/cluster-settings.md %}#setting-changefeed-aggregator-flush-jitter) with the [`min_checkpoint_frequency`]({% link v24.1/create-changefeed.md %}#min-checkpoint-frequency) option set to `0` could cause panics. #125459 +- The log message `"expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat"` is no longer generated. #125449 +- Fixed a bug introduced in v23.2.0 in which CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v24.1/insert.md %}) into [`REGIONAL BY ROW`]({% link v24.1/table-localities.md %}#regional-by-row-tables) tables where the source was a `VALUES` clause with a single row and at least one boolean expression. #126209

Performance improvements

-- Improved the efficiency of error handling in the [vectorized execution engine]({% link v24.1/vectorized-execution.md %}) to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. [#124996][#124996] -- Some [privilege](https://www.cockroachlabs.com/docs/v24.1/security-reference/authorization#managing-privileges) checks when scanning the `crdb_internal.system_jobs` internal table now happen once before the scan, instead of once for each row. This improves performance for queries that read from `crdb_internal.system_jobs`. [#125250][#125250] +- Improved the efficiency of error handling in the [vectorized execution engine]({% link v24.1/vectorized-execution.md %}) to reduce the CPU overhead of statement timeout handling and reduce the potential for more statement timeouts. #124996 +- Some [privilege](https://www.cockroachlabs.com/docs/v24.1/security-reference/authorization#managing-privileges) checks when scanning the `crdb_internal.system_jobs` internal table now happen once before the scan, instead of once for each row. This improves performance for queries that read from `crdb_internal.system_jobs`. #125250

Miscellaneous

-- A [changefeed]({% link v24.1/create-changefeed.md %}) optimization to reduce duplicates during aggregator restarts has been disabled due to poor performance. [#124996][#124996] +- A [changefeed]({% link v24.1/create-changefeed.md %}) optimization to reduce duplicates during aggregator restarts has been disabled due to poor performance. #124996
@@ -56,21 +56,3 @@ This release includes 78 merged PRs by 35 authors.
-[#122706]: https://github.com/cockroachdb/cockroach/pull/122706 -[#124666]: https://github.com/cockroachdb/cockroach/pull/124666 -[#124801]: https://github.com/cockroachdb/cockroach/pull/124801 -[#124810]: https://github.com/cockroachdb/cockroach/pull/124810 -[#124817]: https://github.com/cockroachdb/cockroach/pull/124817 -[#124996]: https://github.com/cockroachdb/cockroach/pull/124996 -[#125000]: https://github.com/cockroachdb/cockroach/pull/125000 -[#125050]: https://github.com/cockroachdb/cockroach/pull/125050 -[#125153]: https://github.com/cockroachdb/cockroach/pull/125153 -[#125250]: https://github.com/cockroachdb/cockroach/pull/125250 -[#125325]: https://github.com/cockroachdb/cockroach/pull/125325 -[#125417]: https://github.com/cockroachdb/cockroach/pull/125417 -[#125449]: https://github.com/cockroachdb/cockroach/pull/125449 -[#125459]: https://github.com/cockroachdb/cockroach/pull/125459 -[#125522]: https://github.com/cockroachdb/cockroach/pull/125522 -[#125533]: https://github.com/cockroachdb/cockroach/pull/125533 -[#125554]: https://github.com/cockroachdb/cockroach/pull/125554 -[#126209]: https://github.com/cockroachdb/cockroach/pull/126209 diff --git a/src/current/_includes/releases/v24.1/v24.1.20.md b/src/current/_includes/releases/v24.1/v24.1.20.md index f8e472092fa..1b7602fffc5 100644 --- a/src/current/_includes/releases/v24.1/v24.1.20.md +++ b/src/current/_includes/releases/v24.1/v24.1.20.md @@ -7,28 +7,21 @@ Release Date: June 25, 2025

Bug fixes

- Fixed a bug that could potentially cause a changefeed to complete erroneously when one of its watched tables encounters a schema change. - [#147039][#147039] + #147039 - Fixed a bug that caused the **SQL Activity** > **Statement Fingerprint** page to fail to load details for statements run with application names containing a `#` character. - [#147220][#147220] + #147220 - Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). - [#147338][#147338] + #147338 - Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in v23.2.22, v24.1.14, v24.3.9, v25.1.2, and the v25.2 alpha. - [#147457][#147457] + #147457 - Fixed a bug where prepared statements on schema changes could fail with runtime errors. - [#147668][#147668] + #147668 - Fixed a bug where `ALTER TABLE` was modifying identity attributes on columns not backed by a sequence. - [#147771][#147771] + #147771

Performance improvements

- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. - [#147213][#147213] + #147213 -[#147668]: https://github.com/cockroachdb/cockroach/pull/147668 -[#147771]: https://github.com/cockroachdb/cockroach/pull/147771 -[#147213]: https://github.com/cockroachdb/cockroach/pull/147213 -[#147039]: https://github.com/cockroachdb/cockroach/pull/147039 -[#147220]: https://github.com/cockroachdb/cockroach/pull/147220 -[#147338]: https://github.com/cockroachdb/cockroach/pull/147338 -[#147457]: https://github.com/cockroachdb/cockroach/pull/147457 diff --git a/src/current/_includes/releases/v24.1/v24.1.21.md b/src/current/_includes/releases/v24.1/v24.1.21.md index a162506139a..7b7257e7c6d 100644 --- a/src/current/_includes/releases/v24.1/v24.1.21.md +++ b/src/current/_includes/releases/v24.1/v24.1.21.md @@ -7,10 +7,8 @@ Release Date: July 28, 2025

Bug fixes

- Fixed a data race in the `cloudstorage` sink. - [#147160][#147160] + #147160 - Fixed a bug where `libpq` clients using the async API could hang with large result sets (Python: psycopg; Ruby: ActiveRecord, ruby-pg). - [#148472][#148472] + #148472 -[#147160]: https://github.com/cockroachdb/cockroach/pull/147160 -[#148472]: https://github.com/cockroachdb/cockroach/pull/148472 diff --git a/src/current/_includes/releases/v24.1/v24.1.22.md b/src/current/_includes/releases/v24.1/v24.1.22.md index 6edb2e3a3df..676ee787995 100644 --- a/src/current/_includes/releases/v24.1/v24.1.22.md +++ b/src/current/_includes/releases/v24.1/v24.1.22.md @@ -7,7 +7,6 @@ Release Date: August 1, 2025

Bug fixes

- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. - [#151098][#151098] + #151098 -[#151098]: https://github.com/cockroachdb/cockroach/pull/151098 diff --git a/src/current/_includes/releases/v24.1/v24.1.23.md b/src/current/_includes/releases/v24.1/v24.1.23.md index 38b7d15a35b..2152067d492 100644 --- a/src/current/_includes/releases/v24.1/v24.1.23.md +++ b/src/current/_includes/releases/v24.1/v24.1.23.md @@ -8,26 +8,21 @@ Release Date: August 22, 2025 - Backporting detailed error logging logic gated behind a cluster setting. The cluster setting enables detailed - error logging for messages exceeding Kafka v2 size limit. [#150183][#150183] -- Changefeeds emitting to Kafka sinks that were created in CockroachDB v24.2.1+, or v23.2.10+ and v24.1.4+ with the `changefeed.new_kafka_sink.enabled` cluster setting enabled now include the message key, size, and MVCC timestamp in "message too large" error logs. [#150183][#150183] + error logging for messages exceeding Kafka v2 size limit. #150183 +- Changefeeds emitting to Kafka sinks that were created in CockroachDB v24.2.1+, or v23.2.10+ and v24.1.4+ with the `changefeed.new_kafka_sink.enabled` cluster setting enabled now include the message key, size, and MVCC timestamp in "message too large" error logs. #150183

Bug fixes

-- Fixed an issue where the `mvcc_timestamp` field was incorrectly returning zero values when used with CDC queries. The timestamp is now emitted correctly. [#147110][#147110] -- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. [#151083][#151083] +- Fixed an issue where the `mvcc_timestamp` field was incorrectly returning zero values when used with CDC queries. The timestamp is now emitted correctly. #147110 +- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. #151083

Build changes

-- Upgrade Go to consume security fixes [#150990][#150990] +- Upgrade Go to consume security fixes #150990

Miscellaneous

- Restore will now re-attempt `AdminSplit` KV requests - instead of immediately failing and pausing the job. [#149618][#149618] + instead of immediately failing and pausing the job. #149618 -[#150990]: https://github.com/cockroachdb/cockroach/pull/150990 -[#149618]: https://github.com/cockroachdb/cockroach/pull/149618 -[#150183]: https://github.com/cockroachdb/cockroach/pull/150183 -[#147110]: https://github.com/cockroachdb/cockroach/pull/147110 -[#151083]: https://github.com/cockroachdb/cockroach/pull/151083 diff --git a/src/current/_includes/releases/v24.1/v24.1.24.md b/src/current/_includes/releases/v24.1/v24.1.24.md index 142e9efbbef..f754237fde6 100644 --- a/src/current/_includes/releases/v24.1/v24.1.24.md +++ b/src/current/_includes/releases/v24.1/v24.1.24.md @@ -6,26 +6,20 @@ Release Date: September 22, 2025

SQL language changes

-- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. [#152603][#152603] +- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. #152603

Operational changes

-- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. [#151490][#151490] +- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. #151490

Bug fixes

-- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. [#151222][#151222] -- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. [#152312][#152312] -- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. [#152744][#152744] +- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. #151222 +- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. #152312 +- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. #152744

Performance improvements

-- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. [#152893][#152893] +- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. #152893 -[#152603]: https://github.com/cockroachdb/cockroach/pull/152603 -[#151490]: https://github.com/cockroachdb/cockroach/pull/151490 -[#151222]: https://github.com/cockroachdb/cockroach/pull/151222 -[#152312]: https://github.com/cockroachdb/cockroach/pull/152312 -[#152744]: https://github.com/cockroachdb/cockroach/pull/152744 -[#152893]: https://github.com/cockroachdb/cockroach/pull/152893 diff --git a/src/current/_includes/releases/v24.1/v24.1.25.md b/src/current/_includes/releases/v24.1/v24.1.25.md index d6baa0384f8..84d2320c586 100644 --- a/src/current/_includes/releases/v24.1/v24.1.25.md +++ b/src/current/_includes/releases/v24.1/v24.1.25.md @@ -6,11 +6,8 @@ Release Date: October 17, 2025

Bug fixes

-- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. [#153606][#153606] -- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. [#154282][#154282] -- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. [#154400][#154400] +- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. #153606 +- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. #154282 +- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. #154400 -[#153606]: https://github.com/cockroachdb/cockroach/pull/153606 -[#154282]: https://github.com/cockroachdb/cockroach/pull/154282 -[#154400]: https://github.com/cockroachdb/cockroach/pull/154400 diff --git a/src/current/_includes/releases/v24.1/v24.1.26.md b/src/current/_includes/releases/v24.1/v24.1.26.md index 09ee265a569..c44e0fe358c 100644 --- a/src/current/_includes/releases/v24.1/v24.1.26.md +++ b/src/current/_includes/releases/v24.1/v24.1.26.md @@ -6,9 +6,7 @@ Release Date: February 19, 2026

Bug fixes

-- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. [#163777][#163777] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. [#163799][#163799] +- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. #163777 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. #163799 -[#163777]: https://github.com/cockroachdb/cockroach/pull/163777 -[#163799]: https://github.com/cockroachdb/cockroach/pull/163799 diff --git a/src/current/_includes/releases/v24.1/v24.1.27.md b/src/current/_includes/releases/v24.1/v24.1.27.md index a7692d26c2a..186614d7cd9 100644 --- a/src/current/_includes/releases/v24.1/v24.1.27.md +++ b/src/current/_includes/releases/v24.1/v24.1.27.md @@ -6,7 +6,6 @@ Release Date: April 8, 2026

Bug fixes

-- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163799][#163799] +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. #163799 -[#163799]: https://github.com/cockroachdb/cockroach/pull/163799 diff --git a/src/current/_includes/releases/v24.1/v24.1.28.md b/src/current/_includes/releases/v24.1/v24.1.28.md index d6bb2c7920d..e14fbfeb426 100644 --- a/src/current/_includes/releases/v24.1/v24.1.28.md +++ b/src/current/_includes/releases/v24.1/v24.1.28.md @@ -6,7 +6,6 @@ Release Date: April 22, 2026

Bug fixes

-- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#168782][#168782] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. #168782 -[#168782]: https://github.com/cockroachdb/cockroach/pull/168782 diff --git a/src/current/_includes/releases/v24.1/v24.1.3.md b/src/current/_includes/releases/v24.1/v24.1.3.md index d1a442252cd..adc992e98bc 100644 --- a/src/current/_includes/releases/v24.1/v24.1.3.md +++ b/src/current/_includes/releases/v24.1/v24.1.3.md @@ -6,36 +6,36 @@ Release Date: August 1, 2024

SQL language changes

-- [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) statements are now supported when executed via Cloud Console [SQL shell]({% link cockroachcloud/sql-shell.md %}). [#125563][#125563] -- Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v24.1/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v24.1/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. [#126300][#126300] +- [`EXPLAIN ANALYZE`]({% link v23.2/explain-analyze.md %}) statements are now supported when executed via Cloud Console [SQL shell]({% link cockroachcloud/sql-shell.md %}). #125563 +- Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v24.1/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v24.1/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. #126300

Operational changes

-- `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` have been removed from the [debug zip]({% link v24.1/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the [per-node execution]({% link v24.1/cockroach-debug-zip.md %}#files) insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. [#125807][#125807] -- For the [TELEMETRY logging channel]({% link v24.1/logging.md %}#telemetry), TCL `sampled_query` events will now be sampled at the rate specified by the setting [`sql.telemetry.query_sampling.max_event_frequency`]({% link v24.1/cluster-settings.md %}#setting-sql-telemetry-query-sampling-max-event-frequency), which is already used to limit the rate of sampling DML statements. [#126729][#126729] +- `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` have been removed from the [debug zip]({% link v24.1/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the [per-node execution]({% link v24.1/cockroach-debug-zip.md %}#files) insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. #125807 +- For the [TELEMETRY logging channel]({% link v24.1/logging.md %}#telemetry), TCL `sampled_query` events will now be sampled at the rate specified by the setting [`sql.telemetry.query_sampling.max_event_frequency`]({% link v24.1/cluster-settings.md %}#setting-sql-telemetry-query-sampling-max-event-frequency), which is already used to limit the rate of sampling DML statements. #126729

Bug fixes

-- Fixed a bug where collection of [debug information]({% link v24.1/cockroach-debug-zip.md %}) for very long-running [jobs]({% link v24.1/show-jobs.md %}) could use excessive space in the `job_info` system table and cause some interactions with the jobs system to become slow. [#126122][#126122] -- Fixed a bug where a change to a [user-defined type (UDT)]({% link v24.1/create-type.md %}) could cause queries against tables using that type to fail with an error: `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v24.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ADD REGION`]({% link v24.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) statement (which implicitly change the [`crdb_internal_region`]({% link v24.1/alter-table.md %}#set-the-table-locality-to-regional-by-row) type). This bug has existed since UDTs were introduced in v20.2. [#125800][#125800] -- Fixed a bug in which constant [`LIKE`]({% link v24.1/sql-feature-support.md %}#scalar-expressions-and-boolean-formulas) patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. [#125539][#125539] -- Fixed the statistics estimation code in the [optimizer]({% link v24.1/cost-based-optimizer.md %}) so it does not use the empty histograms produced if [histogram collection]({% link v24.1/cost-based-optimizer.md %}#control-histogram-collection) has been disabled during stats collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug has existed since CockroachDB v22.1. [#126153][#126153] -- Fixed a bug where [`CREATE TABLE`]({% link v24.1/create-table.md %}) statements with [index expressions]({% link v24.1/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v24.1/transactions.md %}#transaction-retries). [#125967][#125967] -- Fixed a bug where CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v24.1/insert.md %}) into [`REGIONAL BY ROW`]({% link v24.1/table-localities.md %}#regional-by-row-tables) tables where the source is a [`VALUES` clause]({% link v24.1/selection-queries.md %}#values-clause) with a single row and at least one boolean expression. The bug was introduced in v23.2.0. [#125505][#125505] -- Fixed a bug in [`cockroach debug tsdump`]({% link v24.1/cockroach-debug-tsdump.md %}) where the command fails when a custom SQL port is used and the [`--format=raw`]({% link v24.1/cockroach-debug-tsdump.md %}#flags) flag is provided. [#125980][#125980] -- Fixed a bug where a [user-defined function (UDF)]({% link v24.1/user-defined-functions.md %}) that shared a name with a [built-in function]({% link v24.1/functions-and-operators.md %}#built-in-functions) would not be resolved, even if the UDF had higher precedence according to the [`search_path`]({% link v24.1/sql-name-resolution.md %}#search-path) variable. [#126296][#126296] -- Fixed a bug that caused [background jobs]({% link v24.1/show-jobs.md %}) to incorrectly respect a statement timeout. [#126820][#126820] -- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v24.1/expression-indexes.md %}). [#126599][#126599] -- Fixed a PostgreSQL incompatibility bug when inputting `public` as user name for [built-in functions]({% link v24.1/functions-and-operators.md %}) such as `has_database_privilege` and `has_schema_privilege`. [#126851][#126851] -- Fixed a bug when [restoring]({% link v24.1/restore.md %}) a database with a [composite type]({% link v24.1/create-type.md %}#create-a-composite-data-type). [#126428][#126428] -- Fixed a bug where the [Databases]({% link v24.1/ui-databases-page.md %}) page crashed if the range information was not available. [#127092][#127092] -- Fixed a bug where CockroachDB could incorrectly evaluate an [`IS NOT NULL`]({% link v24.1/null-handling.md %}#nulls-and-simple-comparisons) filter if it was applied to non-`NULL` tuples that had `NULL` elements, such as `(1, NULL)` or `(NULL, NULL)`. This bug has existed since v20.2. [#126938][#126938] -- The `sql_sequence_cached_node` value of the [`serial_normalization` setting]({% link v24.1/serial.md %}#modes-of-operation) was not correctly formatted. This could lead to errors while connecting to CockroachDB if the default value of `serial_normalization` was set to `serial_normalization`. The formatting bug was fixed, which also fixes the errors when connecting. [#127675][#127675] +- Fixed a bug where collection of [debug information]({% link v24.1/cockroach-debug-zip.md %}) for very long-running [jobs]({% link v24.1/show-jobs.md %}) could use excessive space in the `job_info` system table and cause some interactions with the jobs system to become slow. #126122 +- Fixed a bug where a change to a [user-defined type (UDT)]({% link v24.1/create-type.md %}) could cause queries against tables using that type to fail with an error: `histogram.go:694: span must be fully contained in the bucket`. The change to the user-defined type could come directly from an [`ALTER TYPE`]({% link v24.1/alter-type.md %}) statement, or indirectly from an [`ALTER DATABASE ADD REGION`]({% link v24.1/alter-database.md %}#add-region) or [`DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) statement (which implicitly change the [`crdb_internal_region`]({% link v24.1/alter-table.md %}#set-the-table-locality-to-regional-by-row) type). This bug has existed since UDTs were introduced in v20.2. #125800 +- Fixed a bug in which constant [`LIKE`]({% link v24.1/sql-feature-support.md %}#scalar-expressions-and-boolean-formulas) patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. #125539 +- Fixed the statistics estimation code in the [optimizer]({% link v24.1/cost-based-optimizer.md %}) so it does not use the empty histograms produced if [histogram collection]({% link v24.1/cost-based-optimizer.md %}#control-histogram-collection) has been disabled during stats collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug has existed since CockroachDB v22.1. #126153 +- Fixed a bug where [`CREATE TABLE`]({% link v24.1/create-table.md %}) statements with [index expressions]({% link v24.1/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v24.1/transactions.md %}#transaction-retries). #125967 +- Fixed a bug where CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v24.1/insert.md %}) into [`REGIONAL BY ROW`]({% link v24.1/table-localities.md %}#regional-by-row-tables) tables where the source is a [`VALUES` clause]({% link v24.1/selection-queries.md %}#values-clause) with a single row and at least one boolean expression. The bug was introduced in v23.2.0. #125505 +- Fixed a bug in [`cockroach debug tsdump`]({% link v24.1/cockroach-debug-tsdump.md %}) where the command fails when a custom SQL port is used and the [`--format=raw`]({% link v24.1/cockroach-debug-tsdump.md %}#flags) flag is provided. #125980 +- Fixed a bug where a [user-defined function (UDF)]({% link v24.1/user-defined-functions.md %}) that shared a name with a [built-in function]({% link v24.1/functions-and-operators.md %}#built-in-functions) would not be resolved, even if the UDF had higher precedence according to the [`search_path`]({% link v24.1/sql-name-resolution.md %}#search-path) variable. #126296 +- Fixed a bug that caused [background jobs]({% link v24.1/show-jobs.md %}) to incorrectly respect a statement timeout. #126820 +- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v24.1/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v24.1/expression-indexes.md %}). #126599 +- Fixed a PostgreSQL incompatibility bug when inputting `public` as user name for [built-in functions]({% link v24.1/functions-and-operators.md %}) such as `has_database_privilege` and `has_schema_privilege`. #126851 +- Fixed a bug when [restoring]({% link v24.1/restore.md %}) a database with a [composite type]({% link v24.1/create-type.md %}#create-a-composite-data-type). #126428 +- Fixed a bug where the [Databases]({% link v24.1/ui-databases-page.md %}) page crashed if the range information was not available. #127092 +- Fixed a bug where CockroachDB could incorrectly evaluate an [`IS NOT NULL`]({% link v24.1/null-handling.md %}#nulls-and-simple-comparisons) filter if it was applied to non-`NULL` tuples that had `NULL` elements, such as `(1, NULL)` or `(NULL, NULL)`. This bug has existed since v20.2. #126938 +- The `sql_sequence_cached_node` value of the [`serial_normalization` setting]({% link v24.1/serial.md %}#modes-of-operation) was not correctly formatted. This could lead to errors while connecting to CockroachDB if the default value of `serial_normalization` was set to `serial_normalization`. The formatting bug was fixed, which also fixes the errors when connecting. #127675

Performance improvements

-- Starting a `cockroach` process will no longer flush buffered [logs]({% link v24.1/logging-overview.md %}) to configured [logging sinks]({% link v24.1/configure-logs.md %}#configure-log-sinks) unless the process is running under `systemd`, where cockroach runs with the `NOTIFY_SOCKET` environment variable. [#126305][#126305] -- [Schema changes]({% link v24.1/online-schema-changes.md %}) that cause a data backfill, such as adding a non-nullable column or changing the primary key, will now split and scatter the temporary indexes used to perform the change. This reduces the chance of causing a [write hotspot]({% link v24.1/performance-best-practices-overview.md %}#hot-spots) that can slow down foreground traffic. [#126684][#126684] +- Starting a `cockroach` process will no longer flush buffered [logs]({% link v24.1/logging-overview.md %}) to configured [logging sinks]({% link v24.1/configure-logs.md %}#configure-log-sinks) unless the process is running under `systemd`, where cockroach runs with the `NOTIFY_SOCKET` environment variable. #126305 +- [Schema changes]({% link v24.1/online-schema-changes.md %}) that cause a data backfill, such as adding a non-nullable column or changing the primary key, will now split and scatter the temporary indexes used to perform the change. This reduces the chance of causing a [write hotspot]({% link v24.1/performance-best-practices-overview.md %}#hot-spots) that can slow down foreground traffic. #126684
@@ -45,27 +45,3 @@ This release includes 122 merged PRs by 39 authors.
-[#125505]: https://github.com/cockroachdb/cockroach/pull/125505 -[#125539]: https://github.com/cockroachdb/cockroach/pull/125539 -[#125563]: https://github.com/cockroachdb/cockroach/pull/125563 -[#125800]: https://github.com/cockroachdb/cockroach/pull/125800 -[#125807]: https://github.com/cockroachdb/cockroach/pull/125807 -[#125967]: https://github.com/cockroachdb/cockroach/pull/125967 -[#125980]: https://github.com/cockroachdb/cockroach/pull/125980 -[#126122]: https://github.com/cockroachdb/cockroach/pull/126122 -[#126153]: https://github.com/cockroachdb/cockroach/pull/126153 -[#126267]: https://github.com/cockroachdb/cockroach/pull/126267 -[#126296]: https://github.com/cockroachdb/cockroach/pull/126296 -[#126300]: https://github.com/cockroachdb/cockroach/pull/126300 -[#126305]: https://github.com/cockroachdb/cockroach/pull/126305 -[#126428]: https://github.com/cockroachdb/cockroach/pull/126428 -[#126599]: https://github.com/cockroachdb/cockroach/pull/126599 -[#126684]: https://github.com/cockroachdb/cockroach/pull/126684 -[#126729]: https://github.com/cockroachdb/cockroach/pull/126729 -[#126820]: https://github.com/cockroachdb/cockroach/pull/126820 -[#126846]: https://github.com/cockroachdb/cockroach/pull/126846 -[#126851]: https://github.com/cockroachdb/cockroach/pull/126851 -[#126938]: https://github.com/cockroachdb/cockroach/pull/126938 -[#127092]: https://github.com/cockroachdb/cockroach/pull/127092 -[#127164]: https://github.com/cockroachdb/cockroach/pull/127164 -[#127675]: https://github.com/cockroachdb/cockroach/pull/127675 diff --git a/src/current/_includes/releases/v24.1/v24.1.4.md b/src/current/_includes/releases/v24.1/v24.1.4.md index 7a3346e3378..36ccf4a2f8b 100644 --- a/src/current/_includes/releases/v24.1/v24.1.4.md +++ b/src/current/_includes/releases/v24.1/v24.1.4.md @@ -6,7 +6,7 @@ Release Date: August 29, 2024

Security updates

-- URLs in the following SQL statements are now sanitized of any secrets before being written to [unredacted logs]({% link v24.1/configure-logs.md %}#redact-logs). [#127506][#127506] +- URLs in the following SQL statements are now sanitized of any secrets before being written to [unredacted logs]({% link v24.1/configure-logs.md %}#redact-logs). #127506 - [`ALTER BACKUP SCHEDULE`]({% link v24.1/alter-backup-schedule.md %}) - [`ALTER BACKUP`]({% link v24.1/alter-backup.md %}) - [`ALTER CHANGEFEED SET sink`]({% link v24.1/alter-changefeed.md %}#set-options-on-a-changefeed) @@ -24,78 +24,42 @@ Release Date: August 29, 2024

{{ site.data.products.enterprise }} edition changes

-- Added a new Kafka [changefeed sink]({% link v24.1/changefeed-sinks.md %}) that uses the [`franz-go` library](https://github.com/twmb/franz-go) and CockroachDB's `batching_sink` implementation. The new Kafka sink can be enabled with the `changefeed.new_kafka_sink_enabled` [cluster setting]({% link v24.1/cluster-settings.md %}), which is disabled by default. [#128018][#128018] -- The new Kafka sink, enabled with [`changefeed.new_kafka_sink_enabled`]({% link v24.1/cluster-settings.md %}), as well as the Google Cloud Pub/Sub sink, now display notices indicating the topics that a changefeed will emit to. [#128333][#128333] +- Added a new Kafka [changefeed sink]({% link v24.1/changefeed-sinks.md %}) that uses the [`franz-go` library](https://github.com/twmb/franz-go) and CockroachDB's `batching_sink` implementation. The new Kafka sink can be enabled with the `changefeed.new_kafka_sink_enabled` [cluster setting]({% link v24.1/cluster-settings.md %}), which is disabled by default. #128018 +- The new Kafka sink, enabled with [`changefeed.new_kafka_sink_enabled`]({% link v24.1/cluster-settings.md %}), as well as the Google Cloud Pub/Sub sink, now display notices indicating the topics that a changefeed will emit to. #128333

SQL language changes

-- Added a new `sql.auth.grant_option_for_owner.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) to prevent the [`GRANT OPTION`]({% link v24.1/show-grants.md %}#privilege-grants) from being given to the owner of an object by default. The cluster setting defaults to `true`, retaining the existing behavior; when it is set to `false`, the `GRANT OPTION` is not implicitly given to an object's owner. The owner will still have all privileges on an object except the ability to grant privileges to other users. [#126959][#126959] +- Added a new `sql.auth.grant_option_for_owner.enabled` [cluster setting]({% link v24.1/cluster-settings.md %}) to prevent the [`GRANT OPTION`]({% link v24.1/show-grants.md %}#privilege-grants) from being given to the owner of an object by default. The cluster setting defaults to `true`, retaining the existing behavior; when it is set to `false`, the `GRANT OPTION` is not implicitly given to an object's owner. The owner will still have all privileges on an object except the ability to grant privileges to other users. #126959

Command-line changes

-- A `--locality-file` flag is now available on the [`cockroach start`]({% link v24.1/cockroach-start.md %}) and [`cockroach start-single-node`]({% link v24.1/cockroach-start-single-node.md %}) commands. This allows specifying node [locality]({% link v24.1/cockroach-start.md %}#locality) (typically a `region` value) as a file, rather than by using the [`--locality` flag]({% link v24.1/cockroach-start.md %}#locality). [#127475][#127475] +- A `--locality-file` flag is now available on the [`cockroach start`]({% link v24.1/cockroach-start.md %}) and [`cockroach start-single-node`]({% link v24.1/cockroach-start-single-node.md %}) commands. This allows specifying node [locality]({% link v24.1/cockroach-start.md %}#locality) (typically a `region` value) as a file, rather than by using the [`--locality` flag]({% link v24.1/cockroach-start.md %}#locality). #127475

DB Console changes

-- The [**Databases** and **Tables** pages]({% link v24.1/ui-databases-page.md %}) in the DB Console now show a loading state while loading information for databases and tables, including size and range counts. [#127748][#127748] -- On the [**Databases** page]({% link v24.1/ui-databases-page.md %}) in the DB Console, table names will no longer appear with quotes around the schema and table name. [#127766][#127766] +- The [**Databases** and **Tables** pages]({% link v24.1/ui-databases-page.md %}) in the DB Console now show a loading state while loading information for databases and tables, including size and range counts. #127748 +- On the [**Databases** page]({% link v24.1/ui-databases-page.md %}) in the DB Console, table names will no longer appear with quotes around the schema and table name. #127766

Bug fixes

-- Fixed a bug introduced in v23.2.0 in which CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v24.1/insert.md %}) into [`REGIONAL BY ROW`]({% link v24.1/alter-table.md %}#set-the-table-locality-to-regional-by-row) tables where the source was a [`VALUES`]({% link v24.1/selection-queries.md %}#values-clause) clause with a single row and at least one Boolean expression. [#127277][#127277] -- Fixed a bug in which the [`DISCARD`]({% link v24.1/alter-table.md %}) statement was disallowed when the [session setting]({% link v24.1/session-variables.md %}#default-transaction-read-only) `default_transaction_read_only` was set to `on`. [#127363][#127363] -- In the [DB Console event log]({% link v24.1/ui-overview-dashboard.md %}#events-panel), [`ALTER ROLE`]({% link v24.1/alter-role.md %}) events now display correctly even when no [role options]({% link v24.1/alter-role.md %}#role-options) are included in the `ALTER ROLE` statement. [#126568][#126568] -- Fixed a formatting issue with the `sql_sequence_cached_node` value of the [`serial_normalization` session setting]({% link v24.1/session-variables.md %}). This could lead to an error connecting to CockroachDB if this value was set as the default for `serial_normalization` via the cluster setting [`sql.defaults.serial_normalization`]({% link v24.1/cluster-settings.md %}#setting-sql-defaults-serial-normalization). [#127673][#127673] -- Fixed a bug where [dropping `ENUM` values]({% link v24.1/alter-type.md %}#drop-a-value-in-a-user-defined-type) that were referenced by [index expressions]({% link v24.1/expression-indexes.md %}) could fail with an error. [#127454][#127454] -- Fixed a bug that caused a memory leak when executing SQL statements with [comments]({% link v24.1/comment-on.md %}), for example, `SELECT /* comment */ 1;`. Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the [SQL session]({% link v24.1/show-sessions.md %}). This bug had been present since v23.1. [#127759][#127759] -- Fixed a bug where [schema changes]({% link v24.1/online-schema-changes.md %}) could hang if the lease rangefeed stopped receiving updates. [#127487][#127487] -- Fixed small memory leaks that would occur during [changefeed]({% link v24.1/change-data-capture-overview.md %}) creation. [#128018][#128018] -- Fixed a memory leak that could occur when specifying a non-existent [virtual cluster]({% link v24.1/cluster-virtualization-overview.md %}) name in the connection string. [#128106][#128106] -- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v24.1/create-index.md %}) would not correctly short-circuit if the given index already existed. [#128311][#128311] -- Fixed a bug in syntax validation, in which the `DESCENDING` clause was not allowed for non-terminal columns of an [inverted index]({% link v24.1/inverted-indexes.md %}). Only the last column of an inverted index should be prevented from being `DESCENDING`. This is now properly checked. [#128311][#128311] -- Fixed a bug where an [index]({% link v24.1/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. [#128311][#128311] -- Setting or dropping a default value on a [computed column]({% link v24.1/computed-columns.md %}) is now blocked, even for `NULL` defaults. Previously, setting or dropping a default value on a computed column was a no-op. [#128466][#128466] -- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug had existed since v22.2 when [user-defined functions (UDFs)]({% link v24.1/user-defined-functions.md %}) were introduced. [#126412][#126412] -- Fixed a bug where [`debug zip`]({% link v24.1/cockroach-debug-zip.md %}) would return an error while fetching unstructured/malformed logs. [#128605][#128605] -- Fixed a bug where a hash-sharded [constraint]({% link v24.1/constraints.md %}) could not be created if it referred to columns that had a backslash in the name. [#128521][#128521] +- Fixed a bug introduced in v23.2.0 in which CockroachDB would hit an internal error when evaluating [`INSERT`s]({% link v24.1/insert.md %}) into [`REGIONAL BY ROW`]({% link v24.1/alter-table.md %}#set-the-table-locality-to-regional-by-row) tables where the source was a [`VALUES`]({% link v24.1/selection-queries.md %}#values-clause) clause with a single row and at least one Boolean expression. #127277 +- Fixed a bug in which the [`DISCARD`]({% link v24.1/alter-table.md %}) statement was disallowed when the [session setting]({% link v24.1/session-variables.md %}#default-transaction-read-only) `default_transaction_read_only` was set to `on`. #127363 +- In the [DB Console event log]({% link v24.1/ui-overview-dashboard.md %}#events-panel), [`ALTER ROLE`]({% link v24.1/alter-role.md %}) events now display correctly even when no [role options]({% link v24.1/alter-role.md %}#role-options) are included in the `ALTER ROLE` statement. #126568 +- Fixed a formatting issue with the `sql_sequence_cached_node` value of the [`serial_normalization` session setting]({% link v24.1/session-variables.md %}). This could lead to an error connecting to CockroachDB if this value was set as the default for `serial_normalization` via the cluster setting [`sql.defaults.serial_normalization`]({% link v24.1/cluster-settings.md %}#setting-sql-defaults-serial-normalization). #127673 +- Fixed a bug where [dropping `ENUM` values]({% link v24.1/alter-type.md %}#drop-a-value-in-a-user-defined-type) that were referenced by [index expressions]({% link v24.1/expression-indexes.md %}) could fail with an error. #127454 +- Fixed a bug that caused a memory leak when executing SQL statements with [comments]({% link v24.1/comment-on.md %}), for example, `SELECT /* comment */ 1;`. Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the [SQL session]({% link v24.1/show-sessions.md %}). This bug had been present since v23.1. #127759 +- Fixed a bug where [schema changes]({% link v24.1/online-schema-changes.md %}) could hang if the lease rangefeed stopped receiving updates. #127487 +- Fixed small memory leaks that would occur during [changefeed]({% link v24.1/change-data-capture-overview.md %}) creation. #128018 +- Fixed a memory leak that could occur when specifying a non-existent [virtual cluster]({% link v24.1/cluster-virtualization-overview.md %}) name in the connection string. #128106 +- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v24.1/create-index.md %}) would not correctly short-circuit if the given index already existed. #128311 +- Fixed a bug in syntax validation, in which the `DESCENDING` clause was not allowed for non-terminal columns of an [inverted index]({% link v24.1/inverted-indexes.md %}). Only the last column of an inverted index should be prevented from being `DESCENDING`. This is now properly checked. #128311 +- Fixed a bug where an [index]({% link v24.1/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. #128311 +- Setting or dropping a default value on a [computed column]({% link v24.1/computed-columns.md %}) is now blocked, even for `NULL` defaults. Previously, setting or dropping a default value on a computed column was a no-op. #128466 +- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug had existed since v22.2 when [user-defined functions (UDFs)]({% link v24.1/user-defined-functions.md %}) were introduced. #126412 +- Fixed a bug where [`debug zip`]({% link v24.1/cockroach-debug-zip.md %}) would return an error while fetching unstructured/malformed logs. #128605 +- Fixed a bug where a hash-sharded [constraint]({% link v24.1/constraints.md %}) could not be created if it referred to columns that had a backslash in the name. #128521 - Fixed a bug in which the output of [`EXPLAIN (OPT, REDACT)`]({% link v24.1/explain.md %}) for various `CREATE` statements was not redacted. This bug had existed since [`EXPLAIN (REDACT)`]({% link v24.1/explain.md %}#parameters) was introduced in v23.1 and affects the following statements: - `EXPLAIN (OPT, REDACT) CREATE TABLE` - `EXPLAIN (OPT, REDACT) CREATE VIEW` - - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` [#128489][#128489] + - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` #128489 -[#126412]: https://github.com/cockroachdb/cockroach/pull/126412 -[#126568]: https://github.com/cockroachdb/cockroach/pull/126568 -[#126959]: https://github.com/cockroachdb/cockroach/pull/126959 -[#127277]: https://github.com/cockroachdb/cockroach/pull/127277 -[#127363]: https://github.com/cockroachdb/cockroach/pull/127363 -[#127390]: https://github.com/cockroachdb/cockroach/pull/127390 -[#127454]: https://github.com/cockroachdb/cockroach/pull/127454 -[#127475]: https://github.com/cockroachdb/cockroach/pull/127475 -[#127487]: https://github.com/cockroachdb/cockroach/pull/127487 -[#127506]: https://github.com/cockroachdb/cockroach/pull/127506 -[#127637]: https://github.com/cockroachdb/cockroach/pull/127637 -[#127673]: https://github.com/cockroachdb/cockroach/pull/127673 -[#127748]: https://github.com/cockroachdb/cockroach/pull/127748 -[#127759]: https://github.com/cockroachdb/cockroach/pull/127759 -[#127766]: https://github.com/cockroachdb/cockroach/pull/127766 -[#128018]: https://github.com/cockroachdb/cockroach/pull/128018 -[#128106]: https://github.com/cockroachdb/cockroach/pull/128106 -[#128185]: https://github.com/cockroachdb/cockroach/pull/128185 -[#128189]: https://github.com/cockroachdb/cockroach/pull/128189 -[#128311]: https://github.com/cockroachdb/cockroach/pull/128311 -[#128324]: https://github.com/cockroachdb/cockroach/pull/128324 -[#128333]: https://github.com/cockroachdb/cockroach/pull/128333 -[#128348]: https://github.com/cockroachdb/cockroach/pull/128348 -[#128466]: https://github.com/cockroachdb/cockroach/pull/128466 -[#128489]: https://github.com/cockroachdb/cockroach/pull/128489 -[#128521]: https://github.com/cockroachdb/cockroach/pull/128521 -[#128605]: https://github.com/cockroachdb/cockroach/pull/128605 -[#128625]: https://github.com/cockroachdb/cockroach/pull/128625 -[170656f4a]: https://github.com/cockroachdb/cockroach/commit/170656f4a -[3b9f14556]: https://github.com/cockroachdb/cockroach/commit/3b9f14556 -[633a858c9]: https://github.com/cockroachdb/cockroach/commit/633a858c9 -[656dc596e]: https://github.com/cockroachdb/cockroach/commit/656dc596e -[b5560ebc1]: https://github.com/cockroachdb/cockroach/commit/b5560ebc1 -[d0337dc3d]: https://github.com/cockroachdb/cockroach/commit/d0337dc3d -[eefaac961]: https://github.com/cockroachdb/cockroach/commit/eefaac961 -[f04d25e57]: https://github.com/cockroachdb/cockroach/commit/f04d25e57 diff --git a/src/current/_includes/releases/v24.1/v24.1.5.md b/src/current/_includes/releases/v24.1/v24.1.5.md index 4211f06bf1d..212df0b995e 100644 --- a/src/current/_includes/releases/v24.1/v24.1.5.md +++ b/src/current/_includes/releases/v24.1/v24.1.5.md @@ -5,84 +5,55 @@ Release Date: September 25, 2024 {% include releases/new-release-downloads-docker-image.md release=include.release %}

General changes

-- Upgraded the `grpc` version to v1.56.3. [#130046][#130046] +- Upgraded the `grpc` version to v1.56.3. #130046

{{ site.data.products.enterprise }} edition changes

-- Introduced the new [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.protect_timestamp.lag`, which controls when a [changefeed's]({% link v24.1/change-data-capture-overview.md %}) protected timestamp is updated. The [protected timestamp]({% link v24.1/architecture/storage-layer.md %}#protected-timestamps) will only be updated if the set `changefeed.protect_timestamp.lag` value has passed between the last protected timestamp and the [changefeed high watermark]({% link v24.1/how-does-an-enterprise-changefeed-work.md %}). [#129686][#129686] -- [`SHOW CHANGEFEED JOB`]({% link v24.1/show-jobs.md %}#show-changefeed-jobs), `SHOW CHANGEFEED JOBS`, and [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) no longer expose user-sensitive information like `client_key`. [#122636][#122636] +- Introduced the new [cluster setting]({% link v24.1/cluster-settings.md %}) `changefeed.protect_timestamp.lag`, which controls when a [changefeed's]({% link v24.1/change-data-capture-overview.md %}) protected timestamp is updated. The [protected timestamp]({% link v24.1/architecture/storage-layer.md %}#protected-timestamps) will only be updated if the set `changefeed.protect_timestamp.lag` value has passed between the last protected timestamp and the [changefeed high watermark]({% link v24.1/how-does-an-enterprise-changefeed-work.md %}). #129686 +- [`SHOW CHANGEFEED JOB`]({% link v24.1/show-jobs.md %}#show-changefeed-jobs), `SHOW CHANGEFEED JOBS`, and [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) no longer expose user-sensitive information like `client_key`. #122636 - Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by individual [changefeeds]({% link v24.1/change-data-capture-overview.md %}) to the following sinks: - [Kafka sinks]({% link v24.1/changefeed-sinks.md %}#kafka). If [child metrics]({% link v24.1/cluster-settings.md %}#setting-server-child-metrics-enabled) are enabled, the metric will have a `kafka` label. - [Webhook sinks]({% link v24.1/changefeed-sinks.md %}#webhook-sink). If [child metrics]({% link v24.1/cluster-settings.md %}#setting-server-child-metrics-enabled) are enabled, the metric will have a `webhook` label. - [Pub/Sub sinks]({% link v24.1/changefeed-sinks.md %}#google-cloud-pub-sub). If [child metrics]({% link v24.1/cluster-settings.md %}#setting-server-child-metrics-enabled) are enabled, the metric will have a `pubsub` label. - - [SQL sink]({% link v24.1/changefeed-for.md %}). If [child metrics]({% link v24.1/cluster-settings.md %}#setting-server-child-metrics-enabled) are enabled, the metric will have a `sql` label. [#130584][#130584] + - [SQL sink]({% link v24.1/changefeed-for.md %}). If [child metrics]({% link v24.1/cluster-settings.md %}#setting-server-child-metrics-enabled) are enabled, the metric will have a `sql` label. #130584

SQL language changes

-- The session setting [`plan_cache_mode=force_generic_plan`]({% link v24.1/session-variables.md %}) can now be used to force [prepared statements]({% link v24.1/savepoint.md %}#savepoints-and-prepared-statements) to use query plans that are [optimized]({% link v24.1/cost-based-optimizer.md %}) once, and reused in future executions without re-optimization, as long as it does not become stale due to [schema changes]({% link v24.1/online-schema-changes.md %}) or a collection of new [table statistics]({% link v24.1/cost-based-optimizer.md %}#table-statistics). The setting takes effect during `EXECUTE` commands. [`EXPLAIN ANALYZE`]({% link v24.1/explain-analyze.md %}) includes a `plan type` field. If a generic query plan is optimized for the current execution, the `plan type` will be `generic, re-optimized`. If a generic query plan is reused for the current execution without performing optimization, the `plan type` will be `generic, reused`. Otherwise, the `plan type` will be `custom`. [#128085][#128085] -- The session setting [`plan_cache_mode=auto`]({% link v24.1/session-variables.md %}) can now be used to instruct the system to automatically determine whether to use `custom` or `generic` query plans for the execution of a [prepared statement]({% link v24.1/savepoint.md %}#savepoints-and-prepared-statements). Custom query plans are optimized on every execution, while generic plans are [optimized]({% link v24.1/cost-based-optimizer.md %}) once and reused on future executions as-is. Generic query plans are beneficial in cases where query optimization contributes significant overhead to the total cost of executing a query. [#128085][#128085] +- The session setting [`plan_cache_mode=force_generic_plan`]({% link v24.1/session-variables.md %}) can now be used to force [prepared statements]({% link v24.1/savepoint.md %}#savepoints-and-prepared-statements) to use query plans that are [optimized]({% link v24.1/cost-based-optimizer.md %}) once, and reused in future executions without re-optimization, as long as it does not become stale due to [schema changes]({% link v24.1/online-schema-changes.md %}) or a collection of new [table statistics]({% link v24.1/cost-based-optimizer.md %}#table-statistics). The setting takes effect during `EXECUTE` commands. [`EXPLAIN ANALYZE`]({% link v24.1/explain-analyze.md %}) includes a `plan type` field. If a generic query plan is optimized for the current execution, the `plan type` will be `generic, re-optimized`. If a generic query plan is reused for the current execution without performing optimization, the `plan type` will be `generic, reused`. Otherwise, the `plan type` will be `custom`. #128085 +- The session setting [`plan_cache_mode=auto`]({% link v24.1/session-variables.md %}) can now be used to instruct the system to automatically determine whether to use `custom` or `generic` query plans for the execution of a [prepared statement]({% link v24.1/savepoint.md %}#savepoints-and-prepared-statements). Custom query plans are optimized on every execution, while generic plans are [optimized]({% link v24.1/cost-based-optimizer.md %}) once and reused on future executions as-is. Generic query plans are beneficial in cases where query optimization contributes significant overhead to the total cost of executing a query. #128085

Operational changes

- There are now structured [logging events]({% link v24.1/logging.md %}) that report connection breakage during node shutdown. Previously, the logs existed, but were unstructured. The logs appear in the [`OPS` logging channel]({% link v24.1/logging.md %}#ops). There are two new events: - The `node_shutdown_connection_timeout` event is logged after the timeout defined by the [`server.shutdown.connections.timeout`]({% link v24.1/cluster-settings.md %}) cluster setting transpires, if there are still open SQL connections. - - The `node_shutdown_transaction_timeout` event is logged after the timeout defined by the [`server.shutdown.transactions.timeout`]({% link v24.1/cluster-settings.md %}) cluster setting transpires, if there are still open transactions on those SQL connections. [#128711][#128711] -- Added a new configuration parameter `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. [#130051][#130051] -- Modified the metrics `sql.bytesin` and `sql.bytesout` to work as aggregate metrics if [child metrics]({% link v24.1/cluster-settings.md %}) are enabled. [#130051][#130051] -- Added three new network-tracking metrics. `rpc.connection.connected` is the number of rRPC TCP level connections established to remote nodes. `rpc.client.bytes.egress` is the number of TCP bytes sent via gRPC on connections CockroachDB initiates. `rpc.client.bytes.ingress` is the number of TCP bytes received via gRPC on connections CockroachDB initiated. [#130050][#130050] -- This commit adds two metrics: `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by the individual [changefeeds]({% link v24.1/change-data-capture-overview.md %}) to different [sinks]({% link v24.1/changefeed-sinks.md %}). [#130584][#130584] + - The `node_shutdown_transaction_timeout` event is logged after the timeout defined by the [`server.shutdown.transactions.timeout`]({% link v24.1/cluster-settings.md %}) cluster setting transpires, if there are still open transactions on those SQL connections. #128711 +- Added a new configuration parameter `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. #130051 +- Modified the metrics `sql.bytesin` and `sql.bytesout` to work as aggregate metrics if [child metrics]({% link v24.1/cluster-settings.md %}) are enabled. #130051 +- Added three new network-tracking metrics. `rpc.connection.connected` is the number of rRPC TCP level connections established to remote nodes. `rpc.client.bytes.egress` is the number of TCP bytes sent via gRPC on connections CockroachDB initiates. `rpc.client.bytes.ingress` is the number of TCP bytes received via gRPC on connections CockroachDB initiated. #130050 +- This commit adds two metrics: `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by the individual [changefeeds]({% link v24.1/change-data-capture-overview.md %}) to different [sinks]({% link v24.1/changefeed-sinks.md %}). #130584

DB Console changes

-- The user experience for [metrics]({% link v24.1/ui-overview-dashboard.md %}) charges in the [DB Console]({% link v24.1/ui-overview.md %}) now has hover behavior that focuses on individual lines and shows values under the mouse pointer. [#128867][#128867] -- Users with the [`VIEWACTIVITY`]({% link v24.1/security-reference/authorization.md %}#supported-privileges) privilege can download statement bundles from the [DB Console]({% link v24.1/ui-overview.md %}). [#129503][#129503] -- Users with the [`VIEWACTIVITY`]({% link v24.1/security-reference/authorization.md %}#supported-privileges) privilege can now request, view, and cancel statement bundles in the [DB Console]({% link v24.1/ui-overview.md %}). [#129805][#129805] -- The [DB Console]({% link v24.1/ui-overview.md %}) will show a notification alerting customers without an {{ site.data.products.enterprise }} license to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) with a link to more information. [#130510][#130510] +- The user experience for [metrics]({% link v24.1/ui-overview-dashboard.md %}) charges in the [DB Console]({% link v24.1/ui-overview.md %}) now has hover behavior that focuses on individual lines and shows values under the mouse pointer. #128867 +- Users with the [`VIEWACTIVITY`]({% link v24.1/security-reference/authorization.md %}#supported-privileges) privilege can download statement bundles from the [DB Console]({% link v24.1/ui-overview.md %}). #129503 +- Users with the [`VIEWACTIVITY`]({% link v24.1/security-reference/authorization.md %}#supported-privileges) privilege can now request, view, and cancel statement bundles in the [DB Console]({% link v24.1/ui-overview.md %}). #129805 +- The [DB Console]({% link v24.1/ui-overview.md %}) will show a notification alerting customers without an {{ site.data.products.enterprise }} license to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) with a link to more information. #130510

Bug fixes

-- Previously, [declarative]({% link v24.1/online-schema-changes.md %}) and legacy [schema changes]({% link v24.1/online-schema-changes.md %}) were incorrectly allowed to be run concurrently, which could lead to failing or hung schema change jobs. [#128805][#128805] -- Fixed a bug that caused errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` when accessing a table with an [expression index]({% link v24.1/expression-indexes.md %}) where the expression evaluates to an `ENUM` type, for example, `CREATE INDEX ON t ((col::an_enum))`. [#129093][#129093] -- Fixed a bug where `NaN` or `Inf` could not be used as the default value for a parameter in [`CREATE FUNCTION`]({% link v24.1/create-function.md %}) statements. [#129086][#129086] -- Fixed a bug in which [`SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE`]({% link v24.1/select-for-update.md %}) queries using [`SKIP LOCKED`]({% link v24.1/select-for-update.md %}#wait-policies) and a [`LIMIT` and/or an `OFFSET`]({% link v24.1/limit-offset.md %}) could return incorrect results under [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation. This bug was present when support for `SKIP LOCKED` under `READ COMMITTED` isolation was introduced in v24.1.0. [#128101][#128101] -- Fixed a bug in which some [`SELECT ... FOR UPDATE` or `SELECT ... FOR SHARE`]({% link v24.1/select-for-update.md %}) queries using [`SKIP LOCKED`]({% link v24.1/select-for-update.md %}#wait-policies) could still block on locked rows when using [`optimizer_use_lock_op_for_serializable`]({% link v24.1/session-variables.md %}#optimizer-use-lock-op-for-serializable) under [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}) isolation. This bug was present when `optimizer_use_lock_op_for_serializable` was introduced in v24.1.0. [#128101][#128101] -- Function input parameters for [user-defined functions]({% link v24.1/user-defined-functions.md %}) can no longer be of the `VOID` type, which matches the behavior of PostgreSQL. [#129280][#129280] -- Fixed a bug in the public preview of [Write Ahead Log (WAL) Failover]({% link v24.1/cockroach-start.md %}#write-ahead-log-wal-failover) that could prevent a node from starting if it crashed during a failover. [#129367][#129367] -- [Starting nodes]({% link v24.1/cockroach-start.md %}) could fail with: `could not insert session ...: unexpected value`, if an ambiguous result error was encountered when inserting into the `sqlliveness` table. [#129234][#129234] -- Internally issued queries that are not initiated within a [SQL session]({% link v24.1/show-sessions.md %}) no longer respect a [statement timeout]({% link v24.1/session-variables.md %}#statement-timeout). This includes: background [jobs]({% link v24.1/show-jobs.md %}), queries issued by the [DB Console]({% link v24.1/ui-overview.md %}) that perform introspection, and the [Cloud SQL shell]({% link cockroachcloud/sql-shell.md %}). [#129515][#129515] -- Fixed a rare bug in [`SHOW CLUSTER SETTING`]({% link v24.1/show-cluster-setting.md %}) that could cause it to fail with an error like `timed out: value differs between local setting and KV`. [#129756][#129756] -- Fixed a bug where the [`schema_locked` table parameter]({% link v24.1/with-storage-parameter.md %}#table-parameters) did not prevent a table from being referenced by a [foreign key]({% link v24.1/foreign-key.md %}). [#129754][#129754] -- Fixed a bug where the [`require_explicit_primary_keys` session variable]({% link v24.1/session-variables.md %}#require-explicit-primary-keys) would aggressively prevent all [`CREATE TABLE`]({% link v24.1/create-table.md %}) statements from working. [#129907][#129907] -- Fixed a slow-building memory leak when using [Kerberos authentication]({% link v24.1/gssapi_authentication.md %}). [#130318][#130318] -- Fixed a potential memory leak in [changefeeds]({% link v24.1/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v24.1/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both the cluster settings [`changefeed.fast_gzip.enabled`]({% link v24.1/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and [`changefeed.cloudstorage.async_flush.enabled`]({% link v24.1/cluster-settings.md %}) are set to `true` and the changefeed received an error while attempting to write to the cloud storage sink. [#130626][#130626] -- Fixed a bug that prevented buffered file sinks from being included when iterating over all file sinks. This led to problems such as the `debug zip` command not being able to fetch logs for a cluster where buffering was enabled. [#130158][#130158] +- Previously, [declarative]({% link v24.1/online-schema-changes.md %}) and legacy [schema changes]({% link v24.1/online-schema-changes.md %}) were incorrectly allowed to be run concurrently, which could lead to failing or hung schema change jobs. #128805 +- Fixed a bug that caused errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` when accessing a table with an [expression index]({% link v24.1/expression-indexes.md %}) where the expression evaluates to an `ENUM` type, for example, `CREATE INDEX ON t ((col::an_enum))`. #129093 +- Fixed a bug where `NaN` or `Inf` could not be used as the default value for a parameter in [`CREATE FUNCTION`]({% link v24.1/create-function.md %}) statements. #129086 +- Fixed a bug in which [`SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE`]({% link v24.1/select-for-update.md %}) queries using [`SKIP LOCKED`]({% link v24.1/select-for-update.md %}#wait-policies) and a [`LIMIT` and/or an `OFFSET`]({% link v24.1/limit-offset.md %}) could return incorrect results under [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation. This bug was present when support for `SKIP LOCKED` under `READ COMMITTED` isolation was introduced in v24.1.0. #128101 +- Fixed a bug in which some [`SELECT ... FOR UPDATE` or `SELECT ... FOR SHARE`]({% link v24.1/select-for-update.md %}) queries using [`SKIP LOCKED`]({% link v24.1/select-for-update.md %}#wait-policies) could still block on locked rows when using [`optimizer_use_lock_op_for_serializable`]({% link v24.1/session-variables.md %}#optimizer-use-lock-op-for-serializable) under [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}) isolation. This bug was present when `optimizer_use_lock_op_for_serializable` was introduced in v24.1.0. #128101 +- Function input parameters for [user-defined functions]({% link v24.1/user-defined-functions.md %}) can no longer be of the `VOID` type, which matches the behavior of PostgreSQL. #129280 +- Fixed a bug in the public preview of [Write Ahead Log (WAL) Failover]({% link v24.1/cockroach-start.md %}#write-ahead-log-wal-failover) that could prevent a node from starting if it crashed during a failover. #129367 +- [Starting nodes]({% link v24.1/cockroach-start.md %}) could fail with: `could not insert session ...: unexpected value`, if an ambiguous result error was encountered when inserting into the `sqlliveness` table. #129234 +- Internally issued queries that are not initiated within a [SQL session]({% link v24.1/show-sessions.md %}) no longer respect a [statement timeout]({% link v24.1/session-variables.md %}#statement-timeout). This includes: background [jobs]({% link v24.1/show-jobs.md %}), queries issued by the [DB Console]({% link v24.1/ui-overview.md %}) that perform introspection, and the [Cloud SQL shell]({% link cockroachcloud/sql-shell.md %}). #129515 +- Fixed a rare bug in [`SHOW CLUSTER SETTING`]({% link v24.1/show-cluster-setting.md %}) that could cause it to fail with an error like `timed out: value differs between local setting and KV`. #129756 +- Fixed a bug where the [`schema_locked` table parameter]({% link v24.1/with-storage-parameter.md %}#table-parameters) did not prevent a table from being referenced by a [foreign key]({% link v24.1/foreign-key.md %}). #129754 +- Fixed a bug where the [`require_explicit_primary_keys` session variable]({% link v24.1/session-variables.md %}#require-explicit-primary-keys) would aggressively prevent all [`CREATE TABLE`]({% link v24.1/create-table.md %}) statements from working. #129907 +- Fixed a slow-building memory leak when using [Kerberos authentication]({% link v24.1/gssapi_authentication.md %}). #130318 +- Fixed a potential memory leak in [changefeeds]({% link v24.1/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v24.1/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both the cluster settings [`changefeed.fast_gzip.enabled`]({% link v24.1/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and [`changefeed.cloudstorage.async_flush.enabled`]({% link v24.1/cluster-settings.md %}) are set to `true` and the changefeed received an error while attempting to write to the cloud storage sink. #130626 +- Fixed a bug that prevented buffered file sinks from being included when iterating over all file sinks. This led to problems such as the `debug zip` command not being able to fetch logs for a cluster where buffering was enabled. #130158 -[#122636]: https://github.com/cockroachdb/cockroach/pull/122636 -[#128085]: https://github.com/cockroachdb/cockroach/pull/128085 -[#128101]: https://github.com/cockroachdb/cockroach/pull/128101 -[#128711]: https://github.com/cockroachdb/cockroach/pull/128711 -[#128805]: https://github.com/cockroachdb/cockroach/pull/128805 -[#128836]: https://github.com/cockroachdb/cockroach/pull/128836 -[#128867]: https://github.com/cockroachdb/cockroach/pull/128867 -[#129086]: https://github.com/cockroachdb/cockroach/pull/129086 -[#129093]: https://github.com/cockroachdb/cockroach/pull/129093 -[#129234]: https://github.com/cockroachdb/cockroach/pull/129234 -[#129280]: https://github.com/cockroachdb/cockroach/pull/129280 -[#129367]: https://github.com/cockroachdb/cockroach/pull/129367 -[#129402]: https://github.com/cockroachdb/cockroach/pull/129402 -[#129503]: https://github.com/cockroachdb/cockroach/pull/129503 -[#129515]: https://github.com/cockroachdb/cockroach/pull/129515 -[#129621]: https://github.com/cockroachdb/cockroach/pull/129621 -[#129686]: https://github.com/cockroachdb/cockroach/pull/129686 -[#129754]: https://github.com/cockroachdb/cockroach/pull/129754 -[#129756]: https://github.com/cockroachdb/cockroach/pull/129756 -[#129805]: https://github.com/cockroachdb/cockroach/pull/129805 -[#129907]: https://github.com/cockroachdb/cockroach/pull/129907 -[#130046]: https://github.com/cockroachdb/cockroach/pull/130046 -[#130050]: https://github.com/cockroachdb/cockroach/pull/130050 -[#130051]: https://github.com/cockroachdb/cockroach/pull/130051 -[#130158]: https://github.com/cockroachdb/cockroach/pull/130158 -[#130318]: https://github.com/cockroachdb/cockroach/pull/130318 -[#130510]: https://github.com/cockroachdb/cockroach/pull/130510 -[#130584]: https://github.com/cockroachdb/cockroach/pull/130584 -[#130626]: https://github.com/cockroachdb/cockroach/pull/130626 diff --git a/src/current/_includes/releases/v24.1/v24.1.6.md b/src/current/_includes/releases/v24.1/v24.1.6.md index 5633982897a..b893a070800 100644 --- a/src/current/_includes/releases/v24.1/v24.1.6.md +++ b/src/current/_includes/releases/v24.1/v24.1.6.md @@ -6,15 +6,15 @@ Release Date: October 17, 2024

{{ site.data.products.enterprise }} edition changes

-- Updated the cluster setting [`changefeed.sink_io_workers`]({% link v24.1/cluster-settings.md %}#setting-changefeed-sink-io-workers) with all the [changefeed sinks]({% link v24.1/changefeed-sinks.md %}) that support the setting. [#130373][#130373] +- Updated the cluster setting [`changefeed.sink_io_workers`]({% link v24.1/cluster-settings.md %}#setting-changefeed-sink-io-workers) with all the [changefeed sinks]({% link v24.1/changefeed-sinks.md %}) that support the setting. #130373 - Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by individual [changefeeds]({% link v24.1/change-data-capture-overview.md %}) to the following sinks: - [Kafka sinks]({% link v24.1/changefeed-sinks.md %}#kafka). If [child metrics are enabled]({% link v24.1/cluster-settings.md %}#setting-server-child-metrics-enabled), the metric will have a `kafka` label. - [Webhook sinks]({% link v24.1/changefeed-sinks.md %}#webhook-sink). If child metrics are enabled, the metric will have a `webhook` label. - [Pub/Sub sinks]({% link v24.1/changefeed-sinks.md %}#google-cloud-pub-sub). If child metrics are enabled, the metric will have a `pubsub` label. - - [SQL sink]({% link v24.1/changefeed-for.md %}). If child metrics are enabled, the metric will have a `sql` label. [#130583][#130583] -- Added a `changefeed.total_ranges` metric that can be used to monitor the number of ranges that are watched by [changefeed]({% link v24.1/change-data-capture-overview.md %}) aggregators. It shares the same polling interval as `changefeed.lagging_ranges`, which is controlled by the existing `lagging_ranges_polling_interval` option. [#130983][#130983] -- Disambiguated [metrics]({% link v24.1/essential-metrics-self-hosted.md %}) and logs for the two buffers used by the KV feed. The following metrics now have a suffix indicating which buffer they correspond to: `changefeed.buffer_entries.*`, `changefeed.buffer_entries_mem.*`, `changefeed.buffer_pushback_nanos.*`. The old versions are kept for backward compatibility, though using the new format is recommended. [#131418][#131418] -- Added timers around key parts of the [changefeed]({% link v24.1/change-data-capture-overview.md %}) pipeline to help debug feeds experiencing issues. The `changefeed.stage.{stage}.latency` metrics now emit latency histograms for each stage. The metric respects the changefeed `scope` label for debugging specific feeds. [#131429][#131429] + - [SQL sink]({% link v24.1/changefeed-for.md %}). If child metrics are enabled, the metric will have a `sql` label. #130583 +- Added a `changefeed.total_ranges` metric that can be used to monitor the number of ranges that are watched by [changefeed]({% link v24.1/change-data-capture-overview.md %}) aggregators. It shares the same polling interval as `changefeed.lagging_ranges`, which is controlled by the existing `lagging_ranges_polling_interval` option. #130983 +- Disambiguated [metrics]({% link v24.1/essential-metrics-self-hosted.md %}) and logs for the two buffers used by the KV feed. The following metrics now have a suffix indicating which buffer they correspond to: `changefeed.buffer_entries.*`, `changefeed.buffer_entries_mem.*`, `changefeed.buffer_pushback_nanos.*`. The old versions are kept for backward compatibility, though using the new format is recommended. #131418 +- Added timers around key parts of the [changefeed]({% link v24.1/change-data-capture-overview.md %}) pipeline to help debug feeds experiencing issues. The `changefeed.stage.{stage}.latency` metrics now emit latency histograms for each stage. The metric respects the changefeed `scope` label for debugging specific feeds. #131429

General changes

@@ -22,32 +22,32 @@ Release Date: October 17, 2024

Operational changes

-- Added a `ranges.decommissioning` [metric]({% link v24.1/metrics.md %}) representing the number of ranges that have a replica on a [decommissioning node]({% link v24.1/node-shutdown.md %}?filters=decommission). [#130248][#130248] -- You can now configure the log format for the [`stderr` log sink]({% link v24.1/configure-logs.md %}#output-to-stderr) by setting the `stderr.format` field in the [YAML configuration]({% link v24.1/configure-logs.md %}#yaml-payload). [#131534][#131534] +- Added a `ranges.decommissioning` [metric]({% link v24.1/metrics.md %}) representing the number of ranges that have a replica on a [decommissioning node]({% link v24.1/node-shutdown.md %}?filters=decommission). #130248 +- You can now configure the log format for the [`stderr` log sink]({% link v24.1/configure-logs.md %}#output-to-stderr) by setting the `stderr.format` field in the [YAML configuration]({% link v24.1/configure-logs.md %}#yaml-payload). #131534

DB Console changes

-- The [DB Console]({% link v24.1/ui-overview.md %}) will now show a notification alerting customers without an Enterprise license to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) with a link to more information. [#130426][#130426] +- The [DB Console]({% link v24.1/ui-overview.md %}) will now show a notification alerting customers without an Enterprise license to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) with a link to more information. #130426

Bug fixes

-- Fixed a bug in which `SHOW CLUSTER SETTING FOR VIRTUAL CLUSTER` would erroneously return `NULL` for some settings. [#128783][#128783] -- Addressed a bug in the [upgrade]({% link v24.1/upgrade-cockroach-version.md %}) pre-condition for repairing descriptor corruption, which could lead to upgrade finalization being stuck. [#130518][#130518] -- Fixed a potential memory leak in [changefeeds]({% link v24.1/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v24.1/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v24.1/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and `changefeed.cloudstorage.async_flush.enabled` were `true`, and the changefeed received an error while attempting to write to the cloud storage sink. [#130601][#130601] -- Fixed a bug in which some [`SELECT FOR UPDATE`]({% link v24.1/select-for-update.md %}) or [`SELECT FOR SHARE`]({% link v24.1/select-for-update.md %}) queries using `NOWAIT` could still block on locked rows when using the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.1/session-variables.md %}) under [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}) isolation. This bug was introduced with [`optimizer_use_lock_op_for_serializable`]({% link v24.1/session-variables.md %}#optimizer-use-lock-op-for-serializable) in v23.2.0. [#130429][#130429] -- Fixed a bug that caused the [optimizer]({% link v24.1/cost-based-optimizer.md %}) to plan unnecessary post-query uniqueness checks during [`INSERT`]({% link v24.1/insert.md %}), [`UPSERT`]({% link v24.1/upsert.md %}), and [`UPDATE`]({% link v24.1/update.md %}) statements on tables with [partial]({% link v24.1/partial-indexes.md %}), [unique]({% link v24.1/create-index.md %}#unique-indexes), [hash-sharded indexes]({% link v24.1/hash-sharded-indexes.md %}). These unnecessary checks added overhead to execution of these statements, and caused the statements to error when executed under [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation. [#130569][#130569] -- Previously, if a connection was attempting a [schema change]({% link v24.1/online-schema-changes.md %}) while the same schema objects were being dropped, it was possible for the connection to be incorrectly dropped. This is now fixed. [#130961][#130961] -- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.1/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.1/global-tables.md %}). [#130950][#130950] -- Fixed a bug that could cause errors with the message `internal error: Non-nullable column ...` when executing statements under [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation that involved tables with [`NOT NULL`]({% link v24.1/not-null.md %}) virtual columns. [#131064][#131064] -- Fixed a bug that could cause incorrect results for queries containing a correlated subquery with a [`GROUP BY`]({% link v24.1/select-clause.md %}#create-aggregate-groups) or [`DISTINCT ON`]({% link v24.1/select-clause.md %}#eliminate-duplicate-rows) operator referencing an outer column. This issue would occur if the correlated subquery was in the input of a [`SELECT`]({% link v24.1/select-clause.md %}) or [`JOIN`]({% link v24.1/joins.md %}) operator that had a filter equating the outer-column reference to a non-outer column in the grouping operator's input, while the grouping column set did not include the replacement column but functionally determined it. This bug was introduced in v23.1. [#130989][#130989] -- The AWS endpoint and cloud custom HTTP client configuration are now considered when [implicit authentication]({% link v24.1/cloud-storage-authentication.md %}) is used for cloud storage. Previously, these were only considered when using explicit credentials. [#131202][#131202] -- Fixed a bug where jobs created in sessions with non-zero session timezone offsets could hang before starting or report incorrect creation times when viewed in [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) and the DB Console. [#131406][#131406] -- Fixed a bug that could prevent a [changefeed]({% link v24.1/change-data-capture-overview.md %}) from being able to resume after being paused for a prolonged period of time. [#130920][#130920] -- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. [#131390][#131390] +- Fixed a bug in which `SHOW CLUSTER SETTING FOR VIRTUAL CLUSTER` would erroneously return `NULL` for some settings. #128783 +- Addressed a bug in the [upgrade]({% link v24.1/upgrade-cockroach-version.md %}) pre-condition for repairing descriptor corruption, which could lead to upgrade finalization being stuck. #130518 +- Fixed a potential memory leak in [changefeeds]({% link v24.1/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v24.1/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v24.1/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and `changefeed.cloudstorage.async_flush.enabled` were `true`, and the changefeed received an error while attempting to write to the cloud storage sink. #130601 +- Fixed a bug in which some [`SELECT FOR UPDATE`]({% link v24.1/select-for-update.md %}) or [`SELECT FOR SHARE`]({% link v24.1/select-for-update.md %}) queries using `NOWAIT` could still block on locked rows when using the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.1/session-variables.md %}) under [`SERIALIZABLE`]({% link v24.1/demo-serializable.md %}) isolation. This bug was introduced with [`optimizer_use_lock_op_for_serializable`]({% link v24.1/session-variables.md %}#optimizer-use-lock-op-for-serializable) in v23.2.0. #130429 +- Fixed a bug that caused the [optimizer]({% link v24.1/cost-based-optimizer.md %}) to plan unnecessary post-query uniqueness checks during [`INSERT`]({% link v24.1/insert.md %}), [`UPSERT`]({% link v24.1/upsert.md %}), and [`UPDATE`]({% link v24.1/update.md %}) statements on tables with [partial]({% link v24.1/partial-indexes.md %}), [unique]({% link v24.1/create-index.md %}#unique-indexes), [hash-sharded indexes]({% link v24.1/hash-sharded-indexes.md %}). These unnecessary checks added overhead to execution of these statements, and caused the statements to error when executed under [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation. #130569 +- Previously, if a connection was attempting a [schema change]({% link v24.1/online-schema-changes.md %}) while the same schema objects were being dropped, it was possible for the connection to be incorrectly dropped. This is now fixed. #130961 +- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.1/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.1/global-tables.md %}). #130950 +- Fixed a bug that could cause errors with the message `internal error: Non-nullable column ...` when executing statements under [`READ COMMITTED`]({% link v24.1/read-committed.md %}) isolation that involved tables with [`NOT NULL`]({% link v24.1/not-null.md %}) virtual columns. #131064 +- Fixed a bug that could cause incorrect results for queries containing a correlated subquery with a [`GROUP BY`]({% link v24.1/select-clause.md %}#create-aggregate-groups) or [`DISTINCT ON`]({% link v24.1/select-clause.md %}#eliminate-duplicate-rows) operator referencing an outer column. This issue would occur if the correlated subquery was in the input of a [`SELECT`]({% link v24.1/select-clause.md %}) or [`JOIN`]({% link v24.1/joins.md %}) operator that had a filter equating the outer-column reference to a non-outer column in the grouping operator's input, while the grouping column set did not include the replacement column but functionally determined it. This bug was introduced in v23.1. #130989 +- The AWS endpoint and cloud custom HTTP client configuration are now considered when [implicit authentication]({% link v24.1/cloud-storage-authentication.md %}) is used for cloud storage. Previously, these were only considered when using explicit credentials. #131202 +- Fixed a bug where jobs created in sessions with non-zero session timezone offsets could hang before starting or report incorrect creation times when viewed in [`SHOW JOBS`]({% link v24.1/show-jobs.md %}) and the DB Console. #131406 +- Fixed a bug that could prevent a [changefeed]({% link v24.1/change-data-capture-overview.md %}) from being able to resume after being paused for a prolonged period of time. #130920 +- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. #131390

Performance improvements

-- The [query optimizer]({% link v24.1/cost-based-optimizer.md %}) now plans limited, [partial index]({% link v24.1/partial-indexes.md %}) scans in more cases when the new session setting [`optimizer_push_limit_into_project_filtered_scan`]({% link v24.1/session-variables.md %}) is set to `on`. [#130336][#130336] +- The [query optimizer]({% link v24.1/cost-based-optimizer.md %}) now plans limited, [partial index]({% link v24.1/partial-indexes.md %}) scans in more cases when the new session setting [`optimizer_push_limit_into_project_filtered_scan`]({% link v24.1/session-variables.md %}) is set to `on`. #130336
@@ -57,36 +57,3 @@ This release includes 85 merged PRs by 39 authors.
-[#128783]: https://github.com/cockroachdb/cockroach/pull/128783 -[#130248]: https://github.com/cockroachdb/cockroach/pull/130248 -[#130336]: https://github.com/cockroachdb/cockroach/pull/130336 -[#130373]: https://github.com/cockroachdb/cockroach/pull/130373 -[#130426]: https://github.com/cockroachdb/cockroach/pull/130426 -[#130429]: https://github.com/cockroachdb/cockroach/pull/130429 -[#130518]: https://github.com/cockroachdb/cockroach/pull/130518 -[#130569]: https://github.com/cockroachdb/cockroach/pull/130569 -[#130583]: https://github.com/cockroachdb/cockroach/pull/130583 -[#130601]: https://github.com/cockroachdb/cockroach/pull/130601 -[#130672]: https://github.com/cockroachdb/cockroach/pull/130672 -[#130677]: https://github.com/cockroachdb/cockroach/pull/130677 -[#130920]: https://github.com/cockroachdb/cockroach/pull/130920 -[#130950]: https://github.com/cockroachdb/cockroach/pull/130950 -[#130961]: https://github.com/cockroachdb/cockroach/pull/130961 -[#130983]: https://github.com/cockroachdb/cockroach/pull/130983 -[#130989]: https://github.com/cockroachdb/cockroach/pull/130989 -[#131064]: https://github.com/cockroachdb/cockroach/pull/131064 -[#131202]: https://github.com/cockroachdb/cockroach/pull/131202 -[#131240]: https://github.com/cockroachdb/cockroach/pull/131240 -[#131312]: https://github.com/cockroachdb/cockroach/pull/131312 -[#131390]: https://github.com/cockroachdb/cockroach/pull/131390 -[#131406]: https://github.com/cockroachdb/cockroach/pull/131406 -[#131418]: https://github.com/cockroachdb/cockroach/pull/131418 -[#131429]: https://github.com/cockroachdb/cockroach/pull/131429 -[#131534]: https://github.com/cockroachdb/cockroach/pull/131534 -[#131596]: https://github.com/cockroachdb/cockroach/pull/131596 -[040f8b2cf]: https://github.com/cockroachdb/cockroach/commit/040f8b2cf -[2fa598d59]: https://github.com/cockroachdb/cockroach/commit/2fa598d59 -[4174b9eee]: https://github.com/cockroachdb/cockroach/commit/4174b9eee -[5dfdef095]: https://github.com/cockroachdb/cockroach/commit/5dfdef095 -[87c44cc41]: https://github.com/cockroachdb/cockroach/commit/87c44cc41 -[969700d5f]: https://github.com/cockroachdb/cockroach/commit/969700d5f diff --git a/src/current/_includes/releases/v24.1/v24.1.7.md b/src/current/_includes/releases/v24.1/v24.1.7.md index f0d339f9c81..4fb5e922bb8 100644 --- a/src/current/_includes/releases/v24.1/v24.1.7.md +++ b/src/current/_includes/releases/v24.1/v24.1.7.md @@ -6,92 +6,49 @@ Release Date: November 18, 2024

Security updates

-- You can now authenticate to the DB Console by passing a JWT as the bearer token. [#133536][#133536] +- You can now authenticate to the DB Console by passing a JWT as the bearer token. #133536

General changes

-- Changed the license `cockroach` is distributed under to the new CockroachDB Software License. [#131706][#131706] [#131939][#131939] [#131950][#131950] [#131982][#131982] [#132004][#132004] [#132005][#132005] [#132007][#132007] [#132009][#132009] [#132008][#132008] [#132010][#132010] [#132017][#132017] [#132057][#132057] [#132802][#132802] [#132750][#132750] -- Attempting to install a second Enterprise Trial license on the same cluster will now fail. [#131972][#131972] -- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has an Enterprise Trial or Enterprise Free license or if the license cannot be loaded. [#132463][#132463] -- The new metrics `changefeed.sink_errors` and `changefeed.internal_retry_message_count` allow you to observe the rate of errors and internal retries for a sink, respectively. [#132352][#132352] -- Added a timer for inner sink client flushes. [#133196][#133196] +- Changed the license `cockroach` is distributed under to the new CockroachDB Software License. #131706 #131939 #131950 #131982 #132004 #132005 #132007 #132009 #132008 #132010 #132017 #132057 #132802 #132750 +- Attempting to install a second Enterprise Trial license on the same cluster will now fail. #131972 +- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has an Enterprise Trial or Enterprise Free license or if the license cannot be loaded. #132463 +- The new metrics `changefeed.sink_errors` and `changefeed.internal_retry_message_count` allow you to observe the rate of errors and internal retries for a sink, respectively. #132352 +- Added a timer for inner sink client flushes. #133196

DB Console changes

-- Some charts on the **Overview** and **Replication** metrics dashboard pages have more terse legends to facilitate easier browsing. [#129358][#129358] -- The DB Console now shows a warning if the cluster is throttled or will be throttled soon due to an expired Enterprise Free or Enterprise Trial license or missing telemetry data. Clusters with an Enterprise license are not throttled. [#132091][#132091] -- The **Range Count** column on the **Databases** page is no longer shown due to performance issues. This data is still available via the `SHOW RANGES` command.[#133269][#133269] +- Some charts on the **Overview** and **Replication** metrics dashboard pages have more terse legends to facilitate easier browsing. #129358 +- The DB Console now shows a warning if the cluster is throttled or will be throttled soon due to an expired Enterprise Free or Enterprise Trial license or missing telemetry data. Clusters with an Enterprise license are not throttled. #132091 +- The **Range Count** column on the **Databases** page is no longer shown due to performance issues. This data is still available via the `SHOW RANGES` command.#133269

Bug fixes

-- Fixed a bug where timers were not correctly registered with the metric system. [#133196][#133196] -- Fixed a bug where the command-line interface would not correctly escape JSON values that had double quotes inside a string when using the `--format=sql` flag. [#131930][#131930] -- Fixed an error that could occur if a `SET` command used an aggregate function as the value. [#131959][#131959] -- Added automated clean-up and validation for dropped roles inside of default privileges. [#132135][#132135] -- Fixed a bug that could cause `RESTORE` to hang after encountering transient errors from the storage layer. [#132259][#132259] -- Fixed a rare bug that could prevent a backup from being restored and could cause the error `rewriting descriptor ids: missing rewrite for in SequenceOwner...`. This bug could occur only if a `DROP COLUMN` operation dropped a sequence while the backup was running. [#132325][#132325] -- Fixed a bug that caused incorrect evaluation of a `CASE`, `COALESCE`, or `IF` expression with a branch that produced fixed-width string-like types, such as `CHAR`. [#130890][#130890] -- Fixed a bug that could cause the `BPCHAR` type to incorrectly impose a length limit of 1. [#130890][#130890] -- Fixed a bug introduced before v23.1 that could cause incorrect results when a join evaluates columns with equivalent but non-identical types, such as `OID` and `REGCLASS`, for equality. The issue arises when the join performs an index lookup on an index that includes a computed column referencing one of the equivalent columns. [#132509][#132509] -- Fixed a bug introduced before v23.1 that could cause a composite sensitive expression to compare differently if comparing equivalent but non-identical input values, such as `2.0::DECIMAL` and `2.00::DECIMAL`. The issue arises when the join performs an index lookup on a table with a computed index column where the computed column expression is composite sensitive. [#132509][#132509] -- Fixed a bug where a span statistics request on a mixed-version cluster could result in a null pointer exception. [#132681][#132681] -- Updated the `franz-go` library to fix a potential deadlock when a changefeed restarts. [#132785][#132785] -- Fixed a bug where a changefeed could fail to update protected timestamp records after a retryable error. [#132775][#132775] -- Fixed a bug where a changefeed that used change data capture queries could fail after a system table was garbage collected. [#131648][#131648] -- Fixed a rare bug introduced in v22.2 where an update of a primary key column could fail to update the primary index if it is also the only column in a separate column family. [#132122][#132122] -- Fixed a bug where the `proretset` column of the `pg_catalog.pg_proc` table was incorrectly set to `false` for builtin functions that return a set. [#132875][#132875] -- Fixed a bug that could cause incorrect evaluation of scalar expressions with `NULL` values. [#132947][#132947] -- Fixed a rare bug in the query optimizer that could cause a node to crash if a query contained a filter in the form `col IN (elem0, elem1, ..., elemN)` when `N` is very large, in the order of millions, and when `col` exists in a hash-sharded index or when a table with an indexed computed column depends on `col`. [#133065][#133065] -- Fixed a bug where an `ALTER DEFAULT PRIVILEGES FOR target_role ...` command could result in an erroneous privilege error when run by a user with the `admin` role. [#133070][#133070] -- Fixed a bug where a `REASSIGN OWNED BY` command would fail to transfer ownership of the public schema, even when the schema was owned by the target role. [#133070][#133070] -- Fixed a panic when resolving the types of an `AS OF SYSTEM TIME` expression. [#132454][#132454] +- Fixed a bug where timers were not correctly registered with the metric system. #133196 +- Fixed a bug where the command-line interface would not correctly escape JSON values that had double quotes inside a string when using the `--format=sql` flag. #131930 +- Fixed an error that could occur if a `SET` command used an aggregate function as the value. #131959 +- Added automated clean-up and validation for dropped roles inside of default privileges. #132135 +- Fixed a bug that could cause `RESTORE` to hang after encountering transient errors from the storage layer. #132259 +- Fixed a rare bug that could prevent a backup from being restored and could cause the error `rewriting descriptor ids: missing rewrite for in SequenceOwner...`. This bug could occur only if a `DROP COLUMN` operation dropped a sequence while the backup was running. #132325 +- Fixed a bug that caused incorrect evaluation of a `CASE`, `COALESCE`, or `IF` expression with a branch that produced fixed-width string-like types, such as `CHAR`. #130890 +- Fixed a bug that could cause the `BPCHAR` type to incorrectly impose a length limit of 1. #130890 +- Fixed a bug introduced before v23.1 that could cause incorrect results when a join evaluates columns with equivalent but non-identical types, such as `OID` and `REGCLASS`, for equality. The issue arises when the join performs an index lookup on an index that includes a computed column referencing one of the equivalent columns. #132509 +- Fixed a bug introduced before v23.1 that could cause a composite sensitive expression to compare differently if comparing equivalent but non-identical input values, such as `2.0::DECIMAL` and `2.00::DECIMAL`. The issue arises when the join performs an index lookup on a table with a computed index column where the computed column expression is composite sensitive. #132509 +- Fixed a bug where a span statistics request on a mixed-version cluster could result in a null pointer exception. #132681 +- Updated the `franz-go` library to fix a potential deadlock when a changefeed restarts. #132785 +- Fixed a bug where a changefeed could fail to update protected timestamp records after a retryable error. #132775 +- Fixed a bug where a changefeed that used change data capture queries could fail after a system table was garbage collected. #131648 +- Fixed a rare bug introduced in v22.2 where an update of a primary key column could fail to update the primary index if it is also the only column in a separate column family. #132122 +- Fixed a bug where the `proretset` column of the `pg_catalog.pg_proc` table was incorrectly set to `false` for builtin functions that return a set. #132875 +- Fixed a bug that could cause incorrect evaluation of scalar expressions with `NULL` values. #132947 +- Fixed a rare bug in the query optimizer that could cause a node to crash if a query contained a filter in the form `col IN (elem0, elem1, ..., elemN)` when `N` is very large, in the order of millions, and when `col` exists in a hash-sharded index or when a table with an indexed computed column depends on `col`. #133065 +- Fixed a bug where an `ALTER DEFAULT PRIVILEGES FOR target_role ...` command could result in an erroneous privilege error when run by a user with the `admin` role. #133070 +- Fixed a bug where a `REASSIGN OWNED BY` command would fail to transfer ownership of the public schema, even when the schema was owned by the target role. #133070 +- Fixed a panic when resolving the types of an `AS OF SYSTEM TIME` expression. #132454

Performance improvements

-- Reduced the write-amplification impact of rebalances by splitting snapshot sorted string table (SST) files into smaller ones before ingesting them into Pebble. [#129018][#129018] -- Performance has been improved during periodic polling of table history when `schema_locked` is not used. [#132190][#132190] +- Reduced the write-amplification impact of rebalances by splitting snapshot sorted string table (SST) files into smaller ones before ingesting them into Pebble. #129018 +- Performance has been improved during periodic polling of table history when `schema_locked` is not used. #132190 -[#129018]: https://github.com/cockroachdb/cockroach/pull/129018 -[#129358]: https://github.com/cockroachdb/cockroach/pull/129358 -[#130890]: https://github.com/cockroachdb/cockroach/pull/130890 -[#131648]: https://github.com/cockroachdb/cockroach/pull/131648 -[#131706]: https://github.com/cockroachdb/cockroach/pull/131706 -[#131930]: https://github.com/cockroachdb/cockroach/pull/131930 -[#131939]: https://github.com/cockroachdb/cockroach/pull/131939 -[#131950]: https://github.com/cockroachdb/cockroach/pull/131950 -[#131959]: https://github.com/cockroachdb/cockroach/pull/131959 -[#131972]: https://github.com/cockroachdb/cockroach/pull/131972 -[#131982]: https://github.com/cockroachdb/cockroach/pull/131982 -[#132004]: https://github.com/cockroachdb/cockroach/pull/132004 -[#132005]: https://github.com/cockroachdb/cockroach/pull/132005 -[#132007]: https://github.com/cockroachdb/cockroach/pull/132007 -[#132008]: https://github.com/cockroachdb/cockroach/pull/132008 -[#132009]: https://github.com/cockroachdb/cockroach/pull/132009 -[#132010]: https://github.com/cockroachdb/cockroach/pull/132010 -[#132017]: https://github.com/cockroachdb/cockroach/pull/132017 -[#132057]: https://github.com/cockroachdb/cockroach/pull/132057 -[#132091]: https://github.com/cockroachdb/cockroach/pull/132091 -[#132122]: https://github.com/cockroachdb/cockroach/pull/132122 -[#132135]: https://github.com/cockroachdb/cockroach/pull/132135 -[#132190]: https://github.com/cockroachdb/cockroach/pull/132190 -[#132259]: https://github.com/cockroachdb/cockroach/pull/132259 -[#132325]: https://github.com/cockroachdb/cockroach/pull/132325 -[#132352]: https://github.com/cockroachdb/cockroach/pull/132352 -[#132454]: https://github.com/cockroachdb/cockroach/pull/132454 -[#132463]: https://github.com/cockroachdb/cockroach/pull/132463 -[#132509]: https://github.com/cockroachdb/cockroach/pull/132509 -[#132681]: https://github.com/cockroachdb/cockroach/pull/132681 -[#132750]: https://github.com/cockroachdb/cockroach/pull/132750 -[#132775]: https://github.com/cockroachdb/cockroach/pull/132775 -[#132785]: https://github.com/cockroachdb/cockroach/pull/132785 -[#132802]: https://github.com/cockroachdb/cockroach/pull/132802 -[#132875]: https://github.com/cockroachdb/cockroach/pull/132875 -[#132947]: https://github.com/cockroachdb/cockroach/pull/132947 -[#132958]: https://github.com/cockroachdb/cockroach/pull/132958 -[#133065]: https://github.com/cockroachdb/cockroach/pull/133065 -[#133070]: https://github.com/cockroachdb/cockroach/pull/133070 -[#133196]: https://github.com/cockroachdb/cockroach/pull/133196 -[#133269]: https://github.com/cockroachdb/cockroach/pull/133269 -[#133474]: https://github.com/cockroachdb/cockroach/pull/133474 -[#133536]: https://github.com/cockroachdb/cockroach/pull/133536 diff --git a/src/current/_includes/releases/v24.1/v24.1.8.md b/src/current/_includes/releases/v24.1/v24.1.8.md index a6b427cf04b..77ff5a225c5 100644 --- a/src/current/_includes/releases/v24.1/v24.1.8.md +++ b/src/current/_includes/releases/v24.1/v24.1.8.md @@ -6,68 +6,39 @@ Release Date: December 12, 2024

Security updates

-- All cluster settings that accept strings are now fully redacted when transmitted as part of our diagnostics telemetry. The transmitted payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}), you can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134016][#134016] +- All cluster settings that accept strings are now fully redacted when transmitted as part of our diagnostics telemetry. The transmitted payload includes a record of modified cluster settings and their values when they are not strings. If you previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}), you can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. #134016

General changes

-- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134087][#134087] -- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps (PTS). This table is required for change data capture queries. [#134836][#134836] -- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the `avro` schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. [#136482][#136482] +- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. #134087 +- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps (PTS). This table is required for change data capture queries. #134836 +- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the `avro` schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. #136482

Operational changes

-- To prevent unnecessary queuing in admission control CPU queues, the `goschedstats.always_use_short_sample_period.enabled` setting should be set to `true` for any production cluster. [#133583][#133583] -- A new cluster setting `ui.database_locality_metadata.enabled`, when set to `true`, disables loading database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. The Database and Table pages in v24.3 onwards do not have this problem. If you require this data, use the `SHOW RANGES FROM {DATABASE|TABLE}` SQL statement to compute this information on-demand. [#134094][#134094] -- The row-level TTL job now will periodically log progress by showing the number of table spans that have been processed so far. [#135179][#135179] +- To prevent unnecessary queuing in admission control CPU queues, the `goschedstats.always_use_short_sample_period.enabled` setting should be set to `true` for any production cluster. #133583 +- A new cluster setting `ui.database_locality_metadata.enabled`, when set to `true`, disables loading database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. The Database and Table pages in v24.3 onwards do not have this problem. If you require this data, use the `SHOW RANGES FROM {DATABASE|TABLE}` SQL statement to compute this information on-demand. #134094 +- The row-level TTL job now will periodically log progress by showing the number of table spans that have been processed so far. #135179

Bug fixes

-- Fixed a bug where CockroachDB could encounter an internal error `interface conversion: coldata.Column is` in an edge case. The bug was present in v22.2.13+, v23.1.9+, and v23.2+. [#133760][#133760] -- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE ... FROM user` on an object that is not a sequence. [#133708][#133708] -- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. [#133822][#133822] -- Addressed a panic that could occur inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. [#133868][#133868] -- String constants can now be compared against collated strings. [#134105][#134105] -- Previously, when executing queries with index / lookup joins where ordering needed to be maintained, CockroachDB's behavior could lead to increased query latency, potentially by several orders of magnitude. This bug was introduced in v22.2, and is now fixed. [#134365][#134365] -- Fixed a bug where `DISCARD ALL` statements were counted under the `sql.ddl.count` metric. Now these statements will be counted under the `sql.misc.count` metric. [#134508][#134508] -- Fixed a bug where `DROP CASCADE` would occasionally panic with an `un-dropped backref` message on partitioned tables. [#134522][#134522] -- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. [#134601][#134601] -- As a non-admin user who runs `DROP ROLE IF EXISTS` on a user that does not exist, you no longer get an error message. [#134968][#134968] -- Fixed a bug that caused quotes around the name of a routine to be dropped when the routine was called within another routine. This could prevent the correct routine name from being resolved if the nested routine name was case-sensitive. The bug has existed since v24.1, when nested routines were introduced. [#134001][#134001] -- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (for example, with an equality filter). [#135111][#135111] -- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema, such as `pg_catalog` or `information_schema`. [#135195][#135195] -- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#135690][#135690] -- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. [#135926][#135926] +- Fixed a bug where CockroachDB could encounter an internal error `interface conversion: coldata.Column is` in an edge case. The bug was present in v22.2.13+, v23.1.9+, and v23.2+. #133760 +- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE ... FROM user` on an object that is not a sequence. #133708 +- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT ... ON CONFLICT ... DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug had been present since at least v20.1.0. #133822 +- Addressed a panic that could occur inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. #133868 +- String constants can now be compared against collated strings. #134105 +- Previously, when executing queries with index / lookup joins where ordering needed to be maintained, CockroachDB's behavior could lead to increased query latency, potentially by several orders of magnitude. This bug was introduced in v22.2, and is now fixed. #134365 +- Fixed a bug where `DISCARD ALL` statements were counted under the `sql.ddl.count` metric. Now these statements will be counted under the `sql.misc.count` metric. #134508 +- Fixed a bug where `DROP CASCADE` would occasionally panic with an `un-dropped backref` message on partitioned tables. #134522 +- Reduced the duration of partitions in the gossip network when a node crashes. This eliminates false positives in the `ranges.unavailable` metric. #134601 +- As a non-admin user who runs `DROP ROLE IF EXISTS` on a user that does not exist, you no longer get an error message. #134968 +- Fixed a bug that caused quotes around the name of a routine to be dropped when the routine was called within another routine. This could prevent the correct routine name from being resolved if the nested routine name was case-sensitive. The bug has existed since v24.1, when nested routines were introduced. #134001 +- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (for example, with an equality filter). #135111 +- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema, such as `pg_catalog` or `information_schema`. #135195 +- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. #135690 +- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. #135926

Performance improvements

-- Unnecessary block loads of SSTable files are now avoided in some rare cases after a replica rebalance. [#134541][#134541] +- Unnecessary block loads of SSTable files are now avoided in some rare cases after a replica rebalance. #134541 -[#133583]: https://github.com/cockroachdb/cockroach/pull/133583 -[#133708]: https://github.com/cockroachdb/cockroach/pull/133708 -[#133760]: https://github.com/cockroachdb/cockroach/pull/133760 -[#133822]: https://github.com/cockroachdb/cockroach/pull/133822 -[#133868]: https://github.com/cockroachdb/cockroach/pull/133868 -[#134001]: https://github.com/cockroachdb/cockroach/pull/134001 -[#134016]: https://github.com/cockroachdb/cockroach/pull/134016 -[#134087]: https://github.com/cockroachdb/cockroach/pull/134087 -[#134094]: https://github.com/cockroachdb/cockroach/pull/134094 -[#134100]: https://github.com/cockroachdb/cockroach/pull/134100 -[#134105]: https://github.com/cockroachdb/cockroach/pull/134105 -[#134365]: https://github.com/cockroachdb/cockroach/pull/134365 -[#134446]: https://github.com/cockroachdb/cockroach/pull/134446 -[#134508]: https://github.com/cockroachdb/cockroach/pull/134508 -[#134522]: https://github.com/cockroachdb/cockroach/pull/134522 -[#134541]: https://github.com/cockroachdb/cockroach/pull/134541 -[#134601]: https://github.com/cockroachdb/cockroach/pull/134601 -[#134648]: https://github.com/cockroachdb/cockroach/pull/134648 -[#134731]: https://github.com/cockroachdb/cockroach/pull/134731 -[#134836]: https://github.com/cockroachdb/cockroach/pull/134836 -[#134968]: https://github.com/cockroachdb/cockroach/pull/134968 -[#135111]: https://github.com/cockroachdb/cockroach/pull/135111 -[#135179]: https://github.com/cockroachdb/cockroach/pull/135179 -[#135195]: https://github.com/cockroachdb/cockroach/pull/135195 -[#135614]: https://github.com/cockroachdb/cockroach/pull/135614 -[#135690]: https://github.com/cockroachdb/cockroach/pull/135690 -[#135926]: https://github.com/cockroachdb/cockroach/pull/135926 -[#136008]: https://github.com/cockroachdb/cockroach/pull/136008 -[#136482]: https://github.com/cockroachdb/cockroach/pull/136482 diff --git a/src/current/_includes/releases/v24.1/v24.1.9.md b/src/current/_includes/releases/v24.1/v24.1.9.md index c77a46b6639..a2e57019a6b 100644 --- a/src/current/_includes/releases/v24.1/v24.1.9.md +++ b/src/current/_includes/releases/v24.1/v24.1.9.md @@ -6,7 +6,6 @@ Release Date: December 26, 2024

SQL language changes

-- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. [#137946][#137946] +- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. #137946 -[#137946]: https://github.com/cockroachdb/cockroach/pull/137946 diff --git a/src/current/_includes/releases/v24.2/v24.2.0-alpha.1.md b/src/current/_includes/releases/v24.2/v24.2.0-alpha.1.md index 5edaf8c7121..69ddbd04778 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0-alpha.1.md +++ b/src/current/_includes/releases/v24.2/v24.2.0-alpha.1.md @@ -16,95 +16,95 @@ Release Date: July 1, 2024 - `storage.category-crdb-log.bytes-written` - `storage.category-sql-row-spill.bytes-written` - `storage.category-sql-col-spill.bytes-written` - - `storage.category-unspecified.bytes-written` [#119024][#119024] -- The `storage.value_blocks.enabled` [cluster setting]({% link v24.2/cluster-settings.md %}) has been removed; value blocks are always enabled. [#122164][#122164] + - `storage.category-unspecified.bytes-written` #119024 +- The `storage.value_blocks.enabled` [cluster setting]({% link v24.2/cluster-settings.md %}) has been removed; value blocks are always enabled. #122164 - The following [metrics]({% link v24.2/metrics.md %}) were added for improved observability into [disk bandwidth]({% link v24.2/common-issues-to-monitor.md %}#storage-and-disk-i-o): - `storage.disk.read-max.bytespersecond` - - `storage.disk.write-max.bytespersecond` [#121553][#121553] -- Added two new [cluster settings]({% link v24.2/cluster-settings.md %}), `storage.sstable.compression_algorithm_backup_storage` and `storage.sstable.compression_algorithm_backup_transport`, which in addition to the existing cluster setting `storage.sstable.compression_algorithm`, can be used to alter the compression algorithm used for various types of [SSTs]({% link v24.2/architecture/storage-layer.md %}#ssts). [#124245][#124245] + - `storage.disk.write-max.bytespersecond` #121553 +- Added two new [cluster settings]({% link v24.2/cluster-settings.md %}), `storage.sstable.compression_algorithm_backup_storage` and `storage.sstable.compression_algorithm_backup_transport`, which in addition to the existing cluster setting `storage.sstable.compression_algorithm`, can be used to alter the compression algorithm used for various types of [SSTs]({% link v24.2/architecture/storage-layer.md %}#ssts). #124245

{{ site.data.products.enterprise }} edition changes

-- `SHOW CHANGEFEED JOB`, [`SHOW CHANGEFEED JOBS`]({% link v24.2/show-jobs.md %}#show-changefeed-jobs), and [`SHOW JOBS`]({% link v24.2/show-jobs.md %}) no longer expose user sensitive information like `client_key`. [#122529][#122529] -- Added the new option [`encode_json_value_null_as_object`]({% link v24.2/create-changefeed.md %}#encode-json-value-null-as-object) to JSON-formatted [changefeeds]({% link v24.2/change-data-capture-overview.md %}) that outputs `'null'::jsonb` as `{"__crdb_json_null__": true}` instead of `null`, to disambiguate between SQL-null and JSON-null. With this option enabled, if the literal value `{"__crdb_json_null__": true}` is present in a JSON value, it will have the same representation as JSON-null with this option enabled. If such a value is encountered in a changefeed, a (rate-limited) warning will be printed to the [`DEV` channel]({% link v24.2/logging.md %}). [#122848][#122848] -- Added an error message for [changefeed]({% link v24.2/change-data-capture-overview.md %}) options and parameters that are not supported by the [Apache Pulsar sink]({% link v24.2/changefeed-sinks.md %}#apache-pulsar). [#124581][#124581] -- [Scheduled changefeeds]({% link v24.2/create-schedule-for-changefeed.md %}) now pause after being [restored]({% link v24.2/restore.md %}) onto a different cluster, and after completion of [physical cluster replication]({% link v24.2/physical-cluster-replication-overview.md %}) to avoid inadvertent concurrent execution of the same schedule on multiple clusters. [#124631][#124631] -- The `DEBUG_PAUSE_ON` option has been removed and replaced with the `restore.restore_after_failure` [pause]({% link v24.2/pause-job.md %}) point to match other pause points used throughout CockroachDB. You can set this pause point by running: `SET CLUSTER SETTING jobs.debug.pausepoints = 'restore.after_restore_failure'`. [#125158][#125158] +- `SHOW CHANGEFEED JOB`, [`SHOW CHANGEFEED JOBS`]({% link v24.2/show-jobs.md %}#show-changefeed-jobs), and [`SHOW JOBS`]({% link v24.2/show-jobs.md %}) no longer expose user sensitive information like `client_key`. #122529 +- Added the new option [`encode_json_value_null_as_object`]({% link v24.2/create-changefeed.md %}#encode-json-value-null-as-object) to JSON-formatted [changefeeds]({% link v24.2/change-data-capture-overview.md %}) that outputs `'null'::jsonb` as `{"__crdb_json_null__": true}` instead of `null`, to disambiguate between SQL-null and JSON-null. With this option enabled, if the literal value `{"__crdb_json_null__": true}` is present in a JSON value, it will have the same representation as JSON-null with this option enabled. If such a value is encountered in a changefeed, a (rate-limited) warning will be printed to the [`DEV` channel]({% link v24.2/logging.md %}). #122848 +- Added an error message for [changefeed]({% link v24.2/change-data-capture-overview.md %}) options and parameters that are not supported by the [Apache Pulsar sink]({% link v24.2/changefeed-sinks.md %}#apache-pulsar). #124581 +- [Scheduled changefeeds]({% link v24.2/create-schedule-for-changefeed.md %}) now pause after being [restored]({% link v24.2/restore.md %}) onto a different cluster, and after completion of [physical cluster replication]({% link v24.2/physical-cluster-replication-overview.md %}) to avoid inadvertent concurrent execution of the same schedule on multiple clusters. #124631 +- The `DEBUG_PAUSE_ON` option has been removed and replaced with the `restore.restore_after_failure` [pause]({% link v24.2/pause-job.md %}) point to match other pause points used throughout CockroachDB. You can set this pause point by running: `SET CLUSTER SETTING jobs.debug.pausepoints = 'restore.after_restore_failure'`. #125158

SQL language changes

-- Default schema privilege changes will now be reflected and can be monitored in the [`pg_default_acl`]({% link v24.2/pg-catalog.md %}) table. [#121506][#121506] +- Default schema privilege changes will now be reflected and can be monitored in the [`pg_default_acl`]({% link v24.2/pg-catalog.md %}) table. #121506 - The schema of the [`pg_catalog.pg_proc`]({% link v24.2/pg-catalog.md %}) virtual table now matches exactly that of PostgreSQL versions 14–16. The following changes are applied: - `proisagg` and `proiswindow` columns are removed (done in v11). - `protransform` column is removed (done in v12). - `prosqlbody` column is added (done in v14). - - `prosupport` and `prokind` columns are moved into their correct spot (these were incorrectly present at the end of the columns list). [#121966][#121966] -- Added [`SHOW EXTERNAL CONNECTIONS`]({% link v24.2/create-external-connection.md %}) and `SHOW EXTERNAL CONNECTION `. These queries display redacted connection URIs and other useful information, such as the connection type. Access to these queries is restricted to the owner of the connection or users with `USAGE` privilege. `admin` or `root` users will have unrestricted access to all connections. [#123545][#123545] -- Using the [`CREATE STATISTICS`]({% link v24.2/create-statistics.md %}) query without the [`AS OF SYSTEM TIME`]({% link v24.2/as-of-system-time.md %}) option could contend with concurrent transactions and cost performance. Running `CREATE STATISTICS` without specifying `AS OF SYSTEM TIME` now uses a default of `-1us`. [#124488][#124488] -- The `nodes` field of the [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output has been renamed to `sql nodes` to clarify that this field describes SQL processing and it does not include any information about KV nodes that might have participated in the query execution. [#124681][#124681] -- [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output now has a new field `KV nodes` that includes all KV nodes that were used to serve read requests by a particular SQL operator. [#124681][#124681] -- Fixed the `Regions` field in the [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output to include regions of KV nodes. Previously, only regions of SQL nodes involved in query processing were included. [#124888][#124888] -- Allow [foreign keys]({% link v24.2/foreign-key.md %}) to be created over stored [computed columns]({% link v24.2/computed-columns.md %}). However, most `ON UPDATE` and `ON DELETE` options for foreign key constraints are not allowed with computed columns. Only `ON UPDATE (NO ACTION|RESTRICT)` and `ON DELETE (NO ACTION|RESTRICT|CASCADE)` are supported. [#124851][#124851] -- [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output now has a new field `used follower read` to SQL operators whenever their reads were served by the follower replicas. Previously, this information was only available in the trace. [#125152][#125152] -- The new attribute `historical: AS OF SYSTEM TIME ...` is now included in [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output whenever the query performs [historical reads]({% link v24.2/as-of-system-time.md %}). [#125152][#125152] -- [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) statements are now supported when executed via UI SQL shell. [#125455][#125455] -- Histograms are no longer constructed using a default sample size of `10k`. Samples are now dynamically sized based on table size unless the sample count has been set in the table or [cluster settings]({% link v24.2/cluster-settings.md %}). [#125345][#125345] -- The [optimizer]({% link v24.2/cost-based-optimizer.md %}) will now generate plans utilizing partial indexes with `IS NOT NULL` predicates in more cases. [#125487][#125487] -- The [`SHOW TYPES`]({% link v24.2/show-types.md %}) statement now includes user-defined composite types. It omitted those types ever since composite types were added in v23.1. [#124730][#124730] -- Added the [`COMMENT ON TYPE`]({% link v24.2/comment-on.md %}) statement for implicit transactions. [#124942][#124942] + - `prosupport` and `prokind` columns are moved into their correct spot (these were incorrectly present at the end of the columns list). #121966 +- Added [`SHOW EXTERNAL CONNECTIONS`]({% link v24.2/create-external-connection.md %}) and `SHOW EXTERNAL CONNECTION `. These queries display redacted connection URIs and other useful information, such as the connection type. Access to these queries is restricted to the owner of the connection or users with `USAGE` privilege. `admin` or `root` users will have unrestricted access to all connections. #123545 +- Using the [`CREATE STATISTICS`]({% link v24.2/create-statistics.md %}) query without the [`AS OF SYSTEM TIME`]({% link v24.2/as-of-system-time.md %}) option could contend with concurrent transactions and cost performance. Running `CREATE STATISTICS` without specifying `AS OF SYSTEM TIME` now uses a default of `-1us`. #124488 +- The `nodes` field of the [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output has been renamed to `sql nodes` to clarify that this field describes SQL processing and it does not include any information about KV nodes that might have participated in the query execution. #124681 +- [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output now has a new field `KV nodes` that includes all KV nodes that were used to serve read requests by a particular SQL operator. #124681 +- Fixed the `Regions` field in the [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output to include regions of KV nodes. Previously, only regions of SQL nodes involved in query processing were included. #124888 +- Allow [foreign keys]({% link v24.2/foreign-key.md %}) to be created over stored [computed columns]({% link v24.2/computed-columns.md %}). However, most `ON UPDATE` and `ON DELETE` options for foreign key constraints are not allowed with computed columns. Only `ON UPDATE (NO ACTION|RESTRICT)` and `ON DELETE (NO ACTION|RESTRICT|CASCADE)` are supported. #124851 +- [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output now has a new field `used follower read` to SQL operators whenever their reads were served by the follower replicas. Previously, this information was only available in the trace. #125152 +- The new attribute `historical: AS OF SYSTEM TIME ...` is now included in [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) output whenever the query performs [historical reads]({% link v24.2/as-of-system-time.md %}). #125152 +- [`EXPLAIN ANALYZE`]({% link v24.2/explain-analyze.md %}) statements are now supported when executed via UI SQL shell. #125455 +- Histograms are no longer constructed using a default sample size of `10k`. Samples are now dynamically sized based on table size unless the sample count has been set in the table or [cluster settings]({% link v24.2/cluster-settings.md %}). #125345 +- The [optimizer]({% link v24.2/cost-based-optimizer.md %}) will now generate plans utilizing partial indexes with `IS NOT NULL` predicates in more cases. #125487 +- The [`SHOW TYPES`]({% link v24.2/show-types.md %}) statement now includes user-defined composite types. It omitted those types ever since composite types were added in v23.1. #124730 +- Added the [`COMMENT ON TYPE`]({% link v24.2/comment-on.md %}) statement for implicit transactions. #124942

Operational changes

-- Added a distinction between [row updates]({% link v24.2/changefeed-messages.md %}) (`row`) and [resolved timestamp]({% link v24.2/changefeed-messages.md %}#resolved-messages) (`resolved`) messages in some changefeed metrics. [#122645][#122645] -- Modified the default Grafana dashboards to include a breakdown by [message]({% link v24.2/changefeed-messages.md %}) type for the `changefeed_emitted_rows` [metric]({% link v24.2/monitor-and-debug-changefeeds.md %}). [#122802][#122802] -- Updated the generated doc descriptions for system visible [cluster settings]({% link v24.2/cluster-settings.md %}) to reflect these are read-only for CockroachDB {{ site.data.products.serverless }} and read-write for the other deployments. [#124028][#124028] -- Added the [cluster setting]({% link v24.2/cluster-settings.md %}) `debug.zip.redact_addresses.enabled` that allows the user to enable or disable redaction of fields like `hostname` and IP addresses. [#123544][#123544] -- Improved [disk usage metric]({% link v24.2/ui-storage-dashboard.md %}#capacity-metrics) reporting over volumes that dynamically change their size over the life of the `cockroach` process. [#124999][#124999] -- `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` have been removed from the [debug zip]({% link v24.2/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the[ per-node execution]({% link v24.2/cockroach-debug-zip.md %}#files) insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. [#125654][#125654] -- Removed the stuck rangefeed cancel feature and its related [cluster setting]({% link v24.2/cluster-settings.md %}) `kv.rangefeed.range_stuck_threshold`, because it was only available in [non-mux rangefeeds]({% link v24.2/advanced-changefeed-configuration.md %}#mux-rangefeeds). Previously, the stuck rangefeed cancel feature was introduced to restart single rangefeeds automatically if they had not received KV updates for some time. [#125663][#125663] -- Fixed a bug where collection of debug information for very long-running [jobs]({% link v24.2/show-jobs.md %}) could use excessive space in the `job_info` system table and/or cause some interactions with the jobs system to become slow. [#126084][#126084] -- All system tables are now visible in application [virtual clusters]({% link v24.2/cluster-virtualization-overview.md %}) that were created before v24.1. [#125979][#125979] +- Added a distinction between [row updates]({% link v24.2/changefeed-messages.md %}) (`row`) and [resolved timestamp]({% link v24.2/changefeed-messages.md %}#resolved-messages) (`resolved`) messages in some changefeed metrics. #122645 +- Modified the default Grafana dashboards to include a breakdown by [message]({% link v24.2/changefeed-messages.md %}) type for the `changefeed_emitted_rows` [metric]({% link v24.2/monitor-and-debug-changefeeds.md %}). #122802 +- Updated the generated doc descriptions for system visible [cluster settings]({% link v24.2/cluster-settings.md %}) to reflect these are read-only for CockroachDB {{ site.data.products.serverless }} and read-write for the other deployments. #124028 +- Added the [cluster setting]({% link v24.2/cluster-settings.md %}) `debug.zip.redact_addresses.enabled` that allows the user to enable or disable redaction of fields like `hostname` and IP addresses. #123544 +- Improved [disk usage metric]({% link v24.2/ui-storage-dashboard.md %}#capacity-metrics) reporting over volumes that dynamically change their size over the life of the `cockroach` process. #124999 +- `crdb_internal.cluster_execution_insights.txt` and `crdb_internal.cluster_txn_execution_insights.txt` have been removed from the [debug zip]({% link v24.2/cockroach-debug-zip.md %}). These files contained cluster-wide insights for statements and transactions. Users can still rely on the[ per-node execution]({% link v24.2/cockroach-debug-zip.md %}#files) insights in `crdb_internal.node_execution_insights.txt` and `crdb_internal.node_txn_execution_insights.txt`. #125654 +- Removed the stuck rangefeed cancel feature and its related [cluster setting]({% link v24.2/cluster-settings.md %}) `kv.rangefeed.range_stuck_threshold`, because it was only available in [non-mux rangefeeds]({% link v24.2/advanced-changefeed-configuration.md %}#mux-rangefeeds). Previously, the stuck rangefeed cancel feature was introduced to restart single rangefeeds automatically if they had not received KV updates for some time. #125663 +- Fixed a bug where collection of debug information for very long-running [jobs]({% link v24.2/show-jobs.md %}) could use excessive space in the `job_info` system table and/or cause some interactions with the jobs system to become slow. #126084 +- All system tables are now visible in application [virtual clusters]({% link v24.2/cluster-virtualization-overview.md %}) that were created before v24.1. #125979

DB Console changes

-- The **Goroutine Scheduling Latency** graph has been added to the [**Runtime** dasboard]({% link v24.2/ui-runtime-dashboard.md %}). [#121369][#121369] -- Updated the time format to use a `.` (dot) as separation between seconds and milliseconds, which affects many pages in the [DB Console]({% link v24.2/ui-overview.md %}). [#121456][#121456] -- The [**Storage** dashboard]({% link v24.2/ui-storage-dashboard.md %}) now contains a graph categorizing disk writes that contribute to `sys.host.disk.write.bytes` according to the source of the write (WAL, compactions, SSTable ingestion, memtable flushes, raft snapshots, encryption registry, logs, SQL columnar spill, or SQL row spill). [#119024][#119024] -- The favicon now renders properly for the [DB Console]({% link v24.2/ui-overview.md %}) along with other image files. [#121380][#121380] -- The color of **Unavailable Ranges** in the [**Summary** panel]({% link v24.2/ui-replication-dashboard.md %}#summary-panel) of the [**Replication** dashboard]({% link v24.2/ui-replication-dashboard.md %}) is now red when nonzero. [#123120][#123120] -- Removed the `$` sign on the [Databases]({% link v24.2/ui-databases-page.md %}) and [Jobs]({% link v24.2/ui-jobs-page.md %}) pages in the [DB Console]({% link v24.2/ui-overview.md %}). [#125370][#125370] -- Added two graphs to the [**Storage** dashboard]({% link v24.2/ui-storage-dashboard.md %}) that display count and size of L0 SSTables in [Pebble]({% link v24.2/architecture/storage-layer.md %}#pebble). This provides increased visibility into L0 compaction issues. [#125653][#125653] -- Removed the p95 metrics from the tooltip on the [**Job Latency**]({% link v24.2/ui-ttl-dashboard.md %}#job-latency) graph of the [**TTL** dashboard]({% link v24.2/ui-ttl-dashboard.md %}), because there are no p95 values computed for any of the metrics. [#122345][#122345] -- Updated the [**Storage** dashboard]({% link v24.2/ui-storage-dashboard.md %}) graphs to show most metrics on a [per-store]({% link v24.2/architecture/storage-layer.md %}) basis when viewing a single node's metrics. This provides increased visibility into issues caused by specific stores on each node. [#125699][#125699] +- The **Goroutine Scheduling Latency** graph has been added to the [**Runtime** dasboard]({% link v24.2/ui-runtime-dashboard.md %}). #121369 +- Updated the time format to use a `.` (dot) as separation between seconds and milliseconds, which affects many pages in the [DB Console]({% link v24.2/ui-overview.md %}). #121456 +- The [**Storage** dashboard]({% link v24.2/ui-storage-dashboard.md %}) now contains a graph categorizing disk writes that contribute to `sys.host.disk.write.bytes` according to the source of the write (WAL, compactions, SSTable ingestion, memtable flushes, raft snapshots, encryption registry, logs, SQL columnar spill, or SQL row spill). #119024 +- The favicon now renders properly for the [DB Console]({% link v24.2/ui-overview.md %}) along with other image files. #121380 +- The color of **Unavailable Ranges** in the [**Summary** panel]({% link v24.2/ui-replication-dashboard.md %}#summary-panel) of the [**Replication** dashboard]({% link v24.2/ui-replication-dashboard.md %}) is now red when nonzero. #123120 +- Removed the `$` sign on the [Databases]({% link v24.2/ui-databases-page.md %}) and [Jobs]({% link v24.2/ui-jobs-page.md %}) pages in the [DB Console]({% link v24.2/ui-overview.md %}). #125370 +- Added two graphs to the [**Storage** dashboard]({% link v24.2/ui-storage-dashboard.md %}) that display count and size of L0 SSTables in [Pebble]({% link v24.2/architecture/storage-layer.md %}#pebble). This provides increased visibility into L0 compaction issues. #125653 +- Removed the p95 metrics from the tooltip on the [**Job Latency**]({% link v24.2/ui-ttl-dashboard.md %}#job-latency) graph of the [**TTL** dashboard]({% link v24.2/ui-ttl-dashboard.md %}), because there are no p95 values computed for any of the metrics. #122345 +- Updated the [**Storage** dashboard]({% link v24.2/ui-storage-dashboard.md %}) graphs to show most metrics on a [per-store]({% link v24.2/architecture/storage-layer.md %}) basis when viewing a single node's metrics. This provides increased visibility into issues caused by specific stores on each node. #125699

Bug fixes

-- Fixed a crash introduced in v23.2.5 and v24.1.0-beta.2 that could occur when planning [statistics collection]({% link v24.2/cost-based-optimizer.md %}#table-statistics) on a table with a [virtual computed column]({% link v24.2/computed-columns.md %}) using a user-defined type when the newly introduced [cluster setting]({% link v24.2/cluster-settings.md %}) `sql.stats.virtual_computed_columns.enabled` is set to `true`. This setting was introduced in v23.2.4 and v24.1.0-alpha.1. [#123926][#123926] -- Fixed handling in the [declarative schema changer]({% link v24.2/online-schema-changes.md %}) when columns are included in the [`STORING()`]({% link v24.2/create-index.md %}#store-columns) clause of [`CREATE INDEX`]({% link v24.2/create-index.md %}). It now checks if the column is [virtual]({% link v24.2/computed-columns.md %}) beforehand, and properly detects when a column is already handled by an existing index when the column name has UTF-8 characters. [#124981][#124981] -- Fixed an issue where [adding a column]({% link v24.2/alter-table.md %}#add-column) with a default value of an empty array would not succeed. [#125284][#125284] -- [`ALTER TABLE ... ADD CONSTRAINT UNIQUE`]({% link v24.2/alter-table.md %}) will now fail with a well-formed error message and code `42601` if a statement tries to add a [`UNIQUE` constraint]({% link v24.2/unique.md %}) on an expression. [#125282][#125282] -- Resolved a log message that read: `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. This message is no longer possible. [#124885][#124885] -- Fixed a bug in v24.1, v23.2, and v23.1 where using the `changefeed.aggregator.flush_jitter` [cluster setting]({% link v24.2/cluster-settings.md %}) with [`min_checkpoint_frequency`]({% link v24.2/create-changefeed.md %}#min-checkpoint-frequency) set to `0` could cause panics. [#125317][#125317] -- Fixed a bug where the [`public` schema]({% link v24.2/sql-name-resolution.md %}#naming-hierarchy) would be created with the wrong owner. Previously, the [`admin` role]({% link v24.2/security-reference/authorization.md %}#admin-role) would own the `public` schema. Now the database owner is also the owner of the `public` schema. The ownership can be altered after the schema is created. [#124894][#124894] -- Previously, CockroachDB would hit an internal error when evaluating inserts into [`REGIONAL BY ROW`]({% link v24.2/table-localities.md %}#regional-by-row-tables) tables where the source is a [`VALUES`]({% link v24.2/insert.md %}#insert-default-values) clause with a single row and at least one boolean expression. The bug was introduced in v23.2.0 and is now fixed. [#125492][#125492] -- Fixed a bug in [logging]({% link v24.2/logging.md %}) where an error code was misreported for canceled queries. This bug affected the [`SQL_PERF`]({% link v24.2/logging.md %}#sql_perf) (slow query logs) and [`SQL_EXEC`]({% link v24.2/logging.md %}#sql_exec) (sql exec logs) logging channels. [#124371][#124371] -- Fixed a bug in which constant `LIKE` patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. [#125507][#125507] -- Fixed a bug introduced in v22.1 where [`cockroach-sql`]({% link v24.2/cockroach-sql-binary.md %}) does not recognize the [`--format`]({% link v24.2/cockroach-sql-binary.md %}#flags) flag. [#124735][#124735] -- Fixed a bug where [`CREATE TABLE`]({% link v24.2/create-table.md %}) with [index expressions]({% link v24.2/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v24.2/transactions.md %}#transaction-retries). [#125910][#125910] -- Fixed a bug where some DDL and administrative statements used within a [common table expression]({% link v24.2/common-table-expressions.md %}) would fail with an `unrecognized relational expression type` internal error. [#125959][#125959] -- Fixed a bug in [`cockroach debug tsdump`]({% link v24.2/cockroach-debug-tsdump.md %}) where the command fails when a custom SQL port is used and the [`--format=raw`]({% link v24.2/cockroach-debug-tsdump.md %}#flags) flag is provided. [#125626][#125626] -- Attempts to alter the data type of a column used in a [computed column]({% link v24.2/computed-columns.md %}) expression are now blocked. [#125870][#125870] -- Fixed the statistics estimation code in the [optimizer]({% link v24.2/cost-based-optimizer.md %}) so it does not use the empty histograms produced if [histogram collection]({% link v24.2/cost-based-optimizer.md %}#control-histogram-collection) has been disabled during stats collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug has existed since CockroachDB v22.1. [#125968][#125968] +- Fixed a crash introduced in v23.2.5 and v24.1.0-beta.2 that could occur when planning [statistics collection]({% link v24.2/cost-based-optimizer.md %}#table-statistics) on a table with a [virtual computed column]({% link v24.2/computed-columns.md %}) using a user-defined type when the newly introduced [cluster setting]({% link v24.2/cluster-settings.md %}) `sql.stats.virtual_computed_columns.enabled` is set to `true`. This setting was introduced in v23.2.4 and v24.1.0-alpha.1. #123926 +- Fixed handling in the [declarative schema changer]({% link v24.2/online-schema-changes.md %}) when columns are included in the [`STORING()`]({% link v24.2/create-index.md %}#store-columns) clause of [`CREATE INDEX`]({% link v24.2/create-index.md %}). It now checks if the column is [virtual]({% link v24.2/computed-columns.md %}) beforehand, and properly detects when a column is already handled by an existing index when the column name has UTF-8 characters. #124981 +- Fixed an issue where [adding a column]({% link v24.2/alter-table.md %}#add-column) with a default value of an empty array would not succeed. #125284 +- [`ALTER TABLE ... ADD CONSTRAINT UNIQUE`]({% link v24.2/alter-table.md %}) will now fail with a well-formed error message and code `42601` if a statement tries to add a [`UNIQUE` constraint]({% link v24.2/unique.md %}) on an expression. #125282 +- Resolved a log message that read: `expiration of liveness record ... is not greater than expiration of the previous lease ... after liveness heartbeat`. This message is no longer possible. #124885 +- Fixed a bug in v24.1, v23.2, and v23.1 where using the `changefeed.aggregator.flush_jitter` [cluster setting]({% link v24.2/cluster-settings.md %}) with [`min_checkpoint_frequency`]({% link v24.2/create-changefeed.md %}#min-checkpoint-frequency) set to `0` could cause panics. #125317 +- Fixed a bug where the [`public` schema]({% link v24.2/sql-name-resolution.md %}#naming-hierarchy) would be created with the wrong owner. Previously, the [`admin` role]({% link v24.2/security-reference/authorization.md %}#admin-role) would own the `public` schema. Now the database owner is also the owner of the `public` schema. The ownership can be altered after the schema is created. #124894 +- Previously, CockroachDB would hit an internal error when evaluating inserts into [`REGIONAL BY ROW`]({% link v24.2/table-localities.md %}#regional-by-row-tables) tables where the source is a [`VALUES`]({% link v24.2/insert.md %}#insert-default-values) clause with a single row and at least one boolean expression. The bug was introduced in v23.2.0 and is now fixed. #125492 +- Fixed a bug in [logging]({% link v24.2/logging.md %}) where an error code was misreported for canceled queries. This bug affected the [`SQL_PERF`]({% link v24.2/logging.md %}#sql_perf) (slow query logs) and [`SQL_EXEC`]({% link v24.2/logging.md %}#sql_exec) (sql exec logs) logging channels. #124371 +- Fixed a bug in which constant `LIKE` patterns containing certain sequences of backslashes did not become constrained scans. This bug has been present since v21.1.13 when support for building constrained scans from `LIKE` patterns containing backslashes was added. #125507 +- Fixed a bug introduced in v22.1 where [`cockroach-sql`]({% link v24.2/cockroach-sql-binary.md %}) does not recognize the [`--format`]({% link v24.2/cockroach-sql-binary.md %}#flags) flag. #124735 +- Fixed a bug where [`CREATE TABLE`]({% link v24.2/create-table.md %}) with [index expressions]({% link v24.2/expression-indexes.md %}) could hit undefined column errors on [transaction retries]({% link v24.2/transactions.md %}#transaction-retries). #125910 +- Fixed a bug where some DDL and administrative statements used within a [common table expression]({% link v24.2/common-table-expressions.md %}) would fail with an `unrecognized relational expression type` internal error. #125959 +- Fixed a bug in [`cockroach debug tsdump`]({% link v24.2/cockroach-debug-tsdump.md %}) where the command fails when a custom SQL port is used and the [`--format=raw`]({% link v24.2/cockroach-debug-tsdump.md %}#flags) flag is provided. #125626 +- Attempts to alter the data type of a column used in a [computed column]({% link v24.2/computed-columns.md %}) expression are now blocked. #125870 +- Fixed the statistics estimation code in the [optimizer]({% link v24.2/cost-based-optimizer.md %}) so it does not use the empty histograms produced if [histogram collection]({% link v24.2/cost-based-optimizer.md %}#control-histogram-collection) has been disabled during stats collection due to excessive memory utilization. Now the optimizer will rely on distinct counts instead of the empty histograms and should produce better plans as a result. This bug has existed since CockroachDB v22.1. #125968

Performance improvements

-- More efficient [query plans]({% link v24.2/cost-based-optimizer.md %}) are now generated for queries with [text similarity filters]({% link v24.2/trigram-indexes.md %}#how-do-trigram-indexes-work), e.g., `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v24.2/session-variables.md %}) is enabled, which it is by default. This setting is disabled by default in previous versions. [#121973][#121973] -- [Schema changes]({% link v24.2/online-schema-changes.md %}) that cause a data backfill, such as adding a non-nullable column or changing the primary key, will now split and scatter the temporary indexes used to perform the change. This reduces the chance of causing a write hotspot that can slow down foreground traffic. [#120736][#120736] -- Multiple or large numbers of [grants]({% link v24.2/show-grants.md %}) on tables and types within one [transaction]({% link v24.2/transactions.md %}) now run faster. [#123414][#123414] -- [Lock operations]({% link v24.2/architecture/transaction-layer.md %}#writes-and-reads-phase-1) are now removed from [query plans]({% link v24.2/cost-based-optimizer.md %}) when the optimizer can prove that no rows would be locked. [#124830][#124830] -- Some privilege checks when scanning the [`crdb_internal.system_jobs`]({% link v24.2/crdb-internal.md %}) internal table now happen once before the scan, instead of once for each row. This will improve performance for queries that read from `crdb_internal.system_jobs`. [#117438][#117438] -- Improved the initial range descriptor scan on startup. [Node startup]({% link v24.2/cockroach-start.md %}) should now be faster on larger stores. [#123959][#123959] -- Previously, concurrent transactions that constructed large write batches could cause [hotspots]({% link v24.2/ui-hot-ranges-page.md %}). This was because the [transaction record]({% link v24.2/architecture/transaction-layer.md %}#transaction-records) for all [ranges]({% link v24.2/architecture/reads-and-writes-overview.md %}#range) would coalesce on a single range, which would then cause this range's [leaseholder]({% link v24.2/architecture/reads-and-writes-overview.md %}#leaseholder) to perform all intent resolution work. This is fixed by distributing transaction records randomly across the ranges the write batch touches. In turn, hotspots are prevented. [#125744][#125744] +- More efficient [query plans]({% link v24.2/cost-based-optimizer.md %}) are now generated for queries with [text similarity filters]({% link v24.2/trigram-indexes.md %}#how-do-trigram-indexes-work), e.g., `text_col % 'foobar'`. These plans are generated if the `optimizer_use_trigram_similarity_optimization` [session setting]({% link v24.2/session-variables.md %}) is enabled, which it is by default. This setting is disabled by default in previous versions. #121973 +- [Schema changes]({% link v24.2/online-schema-changes.md %}) that cause a data backfill, such as adding a non-nullable column or changing the primary key, will now split and scatter the temporary indexes used to perform the change. This reduces the chance of causing a write hotspot that can slow down foreground traffic. #120736 +- Multiple or large numbers of [grants]({% link v24.2/show-grants.md %}) on tables and types within one [transaction]({% link v24.2/transactions.md %}) now run faster. #123414 +- [Lock operations]({% link v24.2/architecture/transaction-layer.md %}#writes-and-reads-phase-1) are now removed from [query plans]({% link v24.2/cost-based-optimizer.md %}) when the optimizer can prove that no rows would be locked. #124830 +- Some privilege checks when scanning the [`crdb_internal.system_jobs`]({% link v24.2/crdb-internal.md %}) internal table now happen once before the scan, instead of once for each row. This will improve performance for queries that read from `crdb_internal.system_jobs`. #117438 +- Improved the initial range descriptor scan on startup. [Node startup]({% link v24.2/cockroach-start.md %}) should now be faster on larger stores. #123959 +- Previously, concurrent transactions that constructed large write batches could cause [hotspots]({% link v24.2/ui-hot-ranges-page.md %}). This was because the [transaction record]({% link v24.2/architecture/transaction-layer.md %}#transaction-records) for all [ranges]({% link v24.2/architecture/reads-and-writes-overview.md %}#range) would coalesce on a single range, which would then cause this range's [leaseholder]({% link v24.2/architecture/reads-and-writes-overview.md %}#leaseholder) to perform all intent resolution work. This is fixed by distributing transaction records randomly across the ranges the write batch touches. In turn, hotspots are prevented. #125744

Contributors

@@ -112,103 +112,3 @@ This release includes 1234 merged PRs by 97 authors. -[#117438]: https://github.com/cockroachdb/cockroach/pull/117438 -[#119024]: https://github.com/cockroachdb/cockroach/pull/119024 -[#120736]: https://github.com/cockroachdb/cockroach/pull/120736 -[#121293]: https://github.com/cockroachdb/cockroach/pull/121293 -[#121318]: https://github.com/cockroachdb/cockroach/pull/121318 -[#121369]: https://github.com/cockroachdb/cockroach/pull/121369 -[#121380]: https://github.com/cockroachdb/cockroach/pull/121380 -[#121456]: https://github.com/cockroachdb/cockroach/pull/121456 -[#121506]: https://github.com/cockroachdb/cockroach/pull/121506 -[#121553]: https://github.com/cockroachdb/cockroach/pull/121553 -[#121609]: https://github.com/cockroachdb/cockroach/pull/121609 -[#121709]: https://github.com/cockroachdb/cockroach/pull/121709 -[#121966]: https://github.com/cockroachdb/cockroach/pull/121966 -[#121973]: https://github.com/cockroachdb/cockroach/pull/121973 -[#122024]: https://github.com/cockroachdb/cockroach/pull/122024 -[#122045]: https://github.com/cockroachdb/cockroach/pull/122045 -[#122164]: https://github.com/cockroachdb/cockroach/pull/122164 -[#122210]: https://github.com/cockroachdb/cockroach/pull/122210 -[#122345]: https://github.com/cockroachdb/cockroach/pull/122345 -[#122529]: https://github.com/cockroachdb/cockroach/pull/122529 -[#122645]: https://github.com/cockroachdb/cockroach/pull/122645 -[#122802]: https://github.com/cockroachdb/cockroach/pull/122802 -[#122848]: https://github.com/cockroachdb/cockroach/pull/122848 -[#123089]: https://github.com/cockroachdb/cockroach/pull/123089 -[#123120]: https://github.com/cockroachdb/cockroach/pull/123120 -[#123145]: https://github.com/cockroachdb/cockroach/pull/123145 -[#123155]: https://github.com/cockroachdb/cockroach/pull/123155 -[#123350]: https://github.com/cockroachdb/cockroach/pull/123350 -[#123381]: https://github.com/cockroachdb/cockroach/pull/123381 -[#123414]: https://github.com/cockroachdb/cockroach/pull/123414 -[#123544]: https://github.com/cockroachdb/cockroach/pull/123544 -[#123545]: https://github.com/cockroachdb/cockroach/pull/123545 -[#123601]: https://github.com/cockroachdb/cockroach/pull/123601 -[#123619]: https://github.com/cockroachdb/cockroach/pull/123619 -[#123650]: https://github.com/cockroachdb/cockroach/pull/123650 -[#123791]: https://github.com/cockroachdb/cockroach/pull/123791 -[#123799]: https://github.com/cockroachdb/cockroach/pull/123799 -[#123827]: https://github.com/cockroachdb/cockroach/pull/123827 -[#123926]: https://github.com/cockroachdb/cockroach/pull/123926 -[#123959]: https://github.com/cockroachdb/cockroach/pull/123959 -[#123961]: https://github.com/cockroachdb/cockroach/pull/123961 -[#124010]: https://github.com/cockroachdb/cockroach/pull/124010 -[#124028]: https://github.com/cockroachdb/cockroach/pull/124028 -[#124053]: https://github.com/cockroachdb/cockroach/pull/124053 -[#124099]: https://github.com/cockroachdb/cockroach/pull/124099 -[#124241]: https://github.com/cockroachdb/cockroach/pull/124241 -[#124245]: https://github.com/cockroachdb/cockroach/pull/124245 -[#124289]: https://github.com/cockroachdb/cockroach/pull/124289 -[#124322]: https://github.com/cockroachdb/cockroach/pull/124322 -[#124371]: https://github.com/cockroachdb/cockroach/pull/124371 -[#124401]: https://github.com/cockroachdb/cockroach/pull/124401 -[#124409]: https://github.com/cockroachdb/cockroach/pull/124409 -[#124488]: https://github.com/cockroachdb/cockroach/pull/124488 -[#124581]: https://github.com/cockroachdb/cockroach/pull/124581 -[#124598]: https://github.com/cockroachdb/cockroach/pull/124598 -[#124604]: https://github.com/cockroachdb/cockroach/pull/124604 -[#124613]: https://github.com/cockroachdb/cockroach/pull/124613 -[#124628]: https://github.com/cockroachdb/cockroach/pull/124628 -[#124630]: https://github.com/cockroachdb/cockroach/pull/124630 -[#124631]: https://github.com/cockroachdb/cockroach/pull/124631 -[#124664]: https://github.com/cockroachdb/cockroach/pull/124664 -[#124681]: https://github.com/cockroachdb/cockroach/pull/124681 -[#124721]: https://github.com/cockroachdb/cockroach/pull/124721 -[#124730]: https://github.com/cockroachdb/cockroach/pull/124730 -[#124735]: https://github.com/cockroachdb/cockroach/pull/124735 -[#124788]: https://github.com/cockroachdb/cockroach/pull/124788 -[#124830]: https://github.com/cockroachdb/cockroach/pull/124830 -[#124851]: https://github.com/cockroachdb/cockroach/pull/124851 -[#124885]: https://github.com/cockroachdb/cockroach/pull/124885 -[#124888]: https://github.com/cockroachdb/cockroach/pull/124888 -[#124894]: https://github.com/cockroachdb/cockroach/pull/124894 -[#124896]: https://github.com/cockroachdb/cockroach/pull/124896 -[#124942]: https://github.com/cockroachdb/cockroach/pull/124942 -[#124971]: https://github.com/cockroachdb/cockroach/pull/124971 -[#124981]: https://github.com/cockroachdb/cockroach/pull/124981 -[#124999]: https://github.com/cockroachdb/cockroach/pull/124999 -[#125152]: https://github.com/cockroachdb/cockroach/pull/125152 -[#125158]: https://github.com/cockroachdb/cockroach/pull/125158 -[#125282]: https://github.com/cockroachdb/cockroach/pull/125282 -[#125284]: https://github.com/cockroachdb/cockroach/pull/125284 -[#125317]: https://github.com/cockroachdb/cockroach/pull/125317 -[#125345]: https://github.com/cockroachdb/cockroach/pull/125345 -[#125370]: https://github.com/cockroachdb/cockroach/pull/125370 -[#125455]: https://github.com/cockroachdb/cockroach/pull/125455 -[#125468]: https://github.com/cockroachdb/cockroach/pull/125468 -[#125487]: https://github.com/cockroachdb/cockroach/pull/125487 -[#125492]: https://github.com/cockroachdb/cockroach/pull/125492 -[#125507]: https://github.com/cockroachdb/cockroach/pull/125507 -[#125626]: https://github.com/cockroachdb/cockroach/pull/125626 -[#125653]: https://github.com/cockroachdb/cockroach/pull/125653 -[#125654]: https://github.com/cockroachdb/cockroach/pull/125654 -[#125663]: https://github.com/cockroachdb/cockroach/pull/125663 -[#125699]: https://github.com/cockroachdb/cockroach/pull/125699 -[#125870]: https://github.com/cockroachdb/cockroach/pull/125870 -[#125959]: https://github.com/cockroachdb/cockroach/pull/125959 -[#125968]: https://github.com/cockroachdb/cockroach/pull/125968 -[#126084]: https://github.com/cockroachdb/cockroach/pull/126084 -[#125744]: https://github.com/cockroachdb/cockroach/pull/125744 -[#125910]: https://github.com/cockroachdb/cockroach/pull/125910 -[#125979]: https://github.com/cockroachdb/cockroach/pull/125979 diff --git a/src/current/_includes/releases/v24.2/v24.2.0-alpha.2.md b/src/current/_includes/releases/v24.2/v24.2.0-alpha.2.md index 6f2d6827f32..943a6cb03a4 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0-alpha.2.md +++ b/src/current/_includes/releases/v24.2/v24.2.0-alpha.2.md @@ -9,31 +9,31 @@ Release Date: July 10, 2024 - The compression option `none` was added to allow for the disabling of SSTable compression. This option can be used with any of the three existing cluster settings that control SSTable compression: - [`storage.sstable.compression_algorithm`]({% link v24.2/cluster-settings.md %}#setting-storage-sstable-compression-algorithm) - [`storage.sstable.compression_algorithm_backup_storage`]({% link v24.2/cluster-settings.md %}#setting-storage-sstable-compression-algorithm-backup-storage) - - [`storage.sstable.compression_algorithm_backup_transport`]({% link v24.2/cluster-settings.md %}#setting-storage-sstable-compression-algorithm-backup-transport) [#126508][#126508] + - [`storage.sstable.compression_algorithm_backup_transport`]({% link v24.2/cluster-settings.md %}#setting-storage-sstable-compression-algorithm-backup-transport) #126508

SQL language changes

-- Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v24.2/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. [#125590][#125590] -- The [`pg_catalog.pg_attribute`]({% link v24.2/pg-catalog.md %}) table now has a column named `attishidden`, which indicates if the table column or attribute is [`NOT VISIBLE`]({% link v24.2/create-table.md %}#not-visible-property). [#126397][#126397] +- Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v24.2/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. #125590 +- The [`pg_catalog.pg_attribute`]({% link v24.2/pg-catalog.md %}) table now has a column named `attishidden`, which indicates if the table column or attribute is [`NOT VISIBLE`]({% link v24.2/create-table.md %}#not-visible-property). #126397

Bug fixes

-- Fixed a bug that could cause internal errors when a [routine]({% link v24.2/user-defined-functions.md %}) had polymorphic parameters or a polymorphic return type or both. The bug has existed since v22.2 when [user-defined functions (UDFs)]({% link v24.2/user-defined-functions.md %}) were introduced. [#123459][#123459] -- In [`SHOW CREATE`]({% link v24.2/show-create.md %}) output, the name of an [enum type]({% link v24.2/enum.md %}) is now formatted as a two-part name (`schema.enum_type`) instead of a three-part name (`database.schema.enum_type`). This change makes it easier to apply the output with enum types to other databases. [#125996][#125996] -- When [altering the data type of columns]({% link v24.2/alter-table.md %}#alter-column) with the [hidden attribute (`NOT VISIBLE`)]({% link v24.2/create-table.md %}#not-visible-property), the alteration now preserves the hidden attribute in the column. Additionally, type alterations for columns with [`ON UPDATE`]({% link v24.2/create-table.md %}#on-update-expressions) expressions or [`DEFAULT`]({% link v24.2/default-value.md %}) expressions are now allowed. [#126033][#126033] -- Fixed a bug where a [user-defined function (UDF)]({% link v24.2/user-defined-functions.md %}) that shared a name with a [built-in function]({% link v24.2/functions-and-operators.md %}#built-in-functions) would not be resolved, even if the UDF had higher precedence according to the [`search_path`]({% link v24.2/sql-name-resolution.md %}#search-path) variable. [#126281][#126281] -- Expressions of type [`BYTES[]`]({% link v24.2/bytes.md %}) are now correctly formatted in [`pg_catalog`]({% link v24.2/pg-catalog.md %}) tables. [#126297][#126297] -- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug has existed since v22.2 when [user-defined functions (UDFs)]({% link v24.2/user-defined-functions.md %}) were introduced. [#123459][#123459] [#126349][#126349] -- [Hidden columns]({% link v24.2/create-table.md %}#not-visible-property) are now included in the `indkey` column of [`pg_catalog.pg_index`]({% link v24.2/pg-catalog.md %}). [#126397][#126397] -- Fixed a bug when inputting `public` role as user name for [built-in compatibility functions]({% link v24.2/functions-and-operators.md %}#compatibility-functions), such as `has_database_privilege` and `has_schema_privilege`. [#126211][#126211] -- Fixed a bug when [restoring]({% link v24.2/restore.md %}) a database with a [composite type]({% link v24.2/create-type.md %}#create-a-composite-data-type). [#126351][#126351] -- Fixed a bug when [creating partial statistics]({% link v24.2/create-statistics.md %}) with the [`USING EXTREMES` option]({% link v24.2/sql-grammar.md %}#create_stats_option) (which is disabled by default) where it would occasionally use incorrect extreme values and collect no stats. This bug occurred when outer buckets were added to the previous histogram to account for extra distinct count. [#126403][#126403] -- In the [DB Console event log]({% link v24.2/ui-overview-dashboard.md %}#events-panel), [`ALTER ROLE`]({% link v24.2/alter-role.md %}) events now display correctly even when no [role options]({% link v24.2/alter-role.md %}#role-options) are included in the `ALTER ROLE` statement. [#126354][#126354] -- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v24.2/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v24.2/expression-indexes.md %}). [#126555][#126555] +- Fixed a bug that could cause internal errors when a [routine]({% link v24.2/user-defined-functions.md %}) had polymorphic parameters or a polymorphic return type or both. The bug has existed since v22.2 when [user-defined functions (UDFs)]({% link v24.2/user-defined-functions.md %}) were introduced. #123459 +- In [`SHOW CREATE`]({% link v24.2/show-create.md %}) output, the name of an [enum type]({% link v24.2/enum.md %}) is now formatted as a two-part name (`schema.enum_type`) instead of a three-part name (`database.schema.enum_type`). This change makes it easier to apply the output with enum types to other databases. #125996 +- When [altering the data type of columns]({% link v24.2/alter-table.md %}#alter-column) with the [hidden attribute (`NOT VISIBLE`)]({% link v24.2/create-table.md %}#not-visible-property), the alteration now preserves the hidden attribute in the column. Additionally, type alterations for columns with [`ON UPDATE`]({% link v24.2/create-table.md %}#on-update-expressions) expressions or [`DEFAULT`]({% link v24.2/default-value.md %}) expressions are now allowed. #126033 +- Fixed a bug where a [user-defined function (UDF)]({% link v24.2/user-defined-functions.md %}) that shared a name with a [built-in function]({% link v24.2/functions-and-operators.md %}#built-in-functions) would not be resolved, even if the UDF had higher precedence according to the [`search_path`]({% link v24.2/sql-name-resolution.md %}#search-path) variable. #126281 +- Expressions of type [`BYTES[]`]({% link v24.2/bytes.md %}) are now correctly formatted in [`pg_catalog`]({% link v24.2/pg-catalog.md %}) tables. #126297 +- Fixed a bug that could cause spurious user permission errors when multiple databases shared a common schema with a routine referencing a table. The bug has existed since v22.2 when [user-defined functions (UDFs)]({% link v24.2/user-defined-functions.md %}) were introduced. #123459 #126349 +- [Hidden columns]({% link v24.2/create-table.md %}#not-visible-property) are now included in the `indkey` column of [`pg_catalog.pg_index`]({% link v24.2/pg-catalog.md %}). #126397 +- Fixed a bug when inputting `public` role as user name for [built-in compatibility functions]({% link v24.2/functions-and-operators.md %}#compatibility-functions), such as `has_database_privilege` and `has_schema_privilege`. #126211 +- Fixed a bug when [restoring]({% link v24.2/restore.md %}) a database with a [composite type]({% link v24.2/create-type.md %}#create-a-composite-data-type). #126351 +- Fixed a bug when [creating partial statistics]({% link v24.2/create-statistics.md %}) with the [`USING EXTREMES` option]({% link v24.2/sql-grammar.md %}#create_stats_option) (which is disabled by default) where it would occasionally use incorrect extreme values and collect no stats. This bug occurred when outer buckets were added to the previous histogram to account for extra distinct count. #126403 +- In the [DB Console event log]({% link v24.2/ui-overview-dashboard.md %}#events-panel), [`ALTER ROLE`]({% link v24.2/alter-role.md %}) events now display correctly even when no [role options]({% link v24.2/alter-role.md %}#role-options) are included in the `ALTER ROLE` statement. #126354 +- Fixed a bug where [`ALTER DATABASE ... DROP REGION`]({% link v24.2/alter-database.md %}#drop-region) could fail if any tables under the given database have [indexes on expressions]({% link v24.2/expression-indexes.md %}). #126555

Performance improvements

-- Starting a `cockroach` process will no longer flush [buffered logs]({% link v24.2/configure-logs.md %}#log-buffering-for-network-sinks) to [configured logging sinks]({% link v24.2/configure-logs.md %}#configure-log-sinks) unless the process is running under `systemd`, where `cockroach` runs with the `NOTIFY_SOCKET` environment variable. [#125971][#125971] +- Starting a `cockroach` process will no longer flush [buffered logs]({% link v24.2/configure-logs.md %}#log-buffering-for-network-sinks) to [configured logging sinks]({% link v24.2/configure-logs.md %}#configure-log-sinks) unless the process is running under `systemd`, where `cockroach` runs with the `NOTIFY_SOCKET` environment variable. #125971

Contributors

@@ -41,30 +41,3 @@ This release includes 130 merged PRs by 42 authors. -[#123459]: https://github.com/cockroachdb/cockroach/pull/123459 -[#125590]: https://github.com/cockroachdb/cockroach/pull/125590 -[#125648]: https://github.com/cockroachdb/cockroach/pull/125648 -[#125905]: https://github.com/cockroachdb/cockroach/pull/125905 -[#125971]: https://github.com/cockroachdb/cockroach/pull/125971 -[#125996]: https://github.com/cockroachdb/cockroach/pull/125996 -[#126033]: https://github.com/cockroachdb/cockroach/pull/126033 -[#126211]: https://github.com/cockroachdb/cockroach/pull/126211 -[#126270]: https://github.com/cockroachdb/cockroach/pull/126270 -[#126281]: https://github.com/cockroachdb/cockroach/pull/126281 -[#126297]: https://github.com/cockroachdb/cockroach/pull/126297 -[#126349]: https://github.com/cockroachdb/cockroach/pull/126349 -[#126351]: https://github.com/cockroachdb/cockroach/pull/126351 -[#126354]: https://github.com/cockroachdb/cockroach/pull/126354 -[#126368]: https://github.com/cockroachdb/cockroach/pull/126368 -[#126393]: https://github.com/cockroachdb/cockroach/pull/126393 -[#126397]: https://github.com/cockroachdb/cockroach/pull/126397 -[#126403]: https://github.com/cockroachdb/cockroach/pull/126403 -[#126508]: https://github.com/cockroachdb/cockroach/pull/126508 -[#126555]: https://github.com/cockroachdb/cockroach/pull/126555 -[1756b05fa]: https://github.com/cockroachdb/cockroach/commit/1756b05fa -[24f84bcb2]: https://github.com/cockroachdb/cockroach/commit/24f84bcb2 -[5884ff568]: https://github.com/cockroachdb/cockroach/commit/5884ff568 -[8f5537acd]: https://github.com/cockroachdb/cockroach/commit/8f5537acd -[9593896c3]: https://github.com/cockroachdb/cockroach/commit/9593896c3 -[d2804f0e8]: https://github.com/cockroachdb/cockroach/commit/d2804f0e8 -[efd597f49]: https://github.com/cockroachdb/cockroach/commit/efd597f49 diff --git a/src/current/_includes/releases/v24.2/v24.2.0-beta.1.md b/src/current/_includes/releases/v24.2/v24.2.0-beta.1.md index ec4f0f7ec9d..9e73e744fa6 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0-beta.1.md +++ b/src/current/_includes/releases/v24.2/v24.2.0-beta.1.md @@ -6,31 +6,31 @@ Release Date: July 18, 2024

Security updates

-- Added support for a custom certificate authority (CA) to verify certificates from the JWT issuer domain, which hosts the JSON Web Key Set (JWKS) configuration that is fetched to validate JWT, along with the new [cluster setting]({% link v24.2/cluster-settings.md %}) `server.jwt_authentication.issuer_custom_ca` to set the custom root CA. [#126062][#126062] +- Added support for a custom certificate authority (CA) to verify certificates from the JWT issuer domain, which hosts the JSON Web Key Set (JWKS) configuration that is fetched to validate JWT, along with the new [cluster setting]({% link v24.2/cluster-settings.md %}) `server.jwt_authentication.issuer_custom_ca` to set the custom root CA. #126062

General changes

-- [Job]({% link v24.2/show-jobs.md %}) status changes now log events to the [OPS channel]({% link v24.2/logging.md %}#logging-channels), to indicate the previous and new status of the job. [#125319][#125319] +- [Job]({% link v24.2/show-jobs.md %}) status changes now log events to the [OPS channel]({% link v24.2/logging.md %}#logging-channels), to indicate the previous and new status of the job. #125319

{{ site.data.products.enterprise }} edition changes

-- The new [cluster setting]({% link v24.2/cluster-settings.md %}) `server.oidc_authentication.client.timeout` allows configuration of the HTTP client timeout for external calls made during [OIDC authentication]({% link v24.2/sso-db-console.md %}). The default timeout is 30 seconds. [#125767][#125767] -- The [Kafka sink for changefeeds]({% link v24.2/changefeed-sinks.md %}) now supports authentication using AWS IAM roles, providing a more secure method for connecting to AWS Managed Streaming for Apache Kafka (MSK) clusters. [#125745][#125745] +- The new [cluster setting]({% link v24.2/cluster-settings.md %}) `server.oidc_authentication.client.timeout` allows configuration of the HTTP client timeout for external calls made during [OIDC authentication]({% link v24.2/sso-db-console.md %}). The default timeout is 30 seconds. #125767 +- The [Kafka sink for changefeeds]({% link v24.2/changefeed-sinks.md %}) now supports authentication using AWS IAM roles, providing a more secure method for connecting to AWS Managed Streaming for Apache Kafka (MSK) clusters. #125745

SQL language changes

-- Added [pgvector](https://github.com/pgvector/pgvector) encoding, decoding, and operators, without index acceleration. [#124292][#124292] -- Added support for generic query plans to the [optimizer]({% link v24.2/cost-based-optimizer.md %}) to reduce the computational burden of query optimization by caching and reusing plans in later executions of the same statement. "Custom" query plans are optimized on every execution, while "generic" plans are optimized once and reused on future executions. Generic query plans are beneficial in cases where query optimization contributes significant overhead to the total cost of executing a query. [#126528][#126528] [#127012][#127012] +- Added [pgvector](https://github.com/pgvector/pgvector) encoding, decoding, and operators, without index acceleration. #124292 +- Added support for generic query plans to the [optimizer]({% link v24.2/cost-based-optimizer.md %}) to reduce the computational burden of query optimization by caching and reusing plans in later executions of the same statement. "Custom" query plans are optimized on every execution, while "generic" plans are optimized once and reused on future executions. Generic query plans are beneficial in cases where query optimization contributes significant overhead to the total cost of executing a query. #126528 #127012 - When the [session setting]({% link v24.2/session-variables.md %}) `plan_cache_mode` is set to `auto`, the system automatically determines whether to use custom or generic query plans for the execution of a prepared statement. - When the [session setting]({% link v24.2/session-variables.md %}) `plan_cache_mode` is set to `force_generic_plan`, prepared statements will reuse optimized query plans without re-optimization, as long as the plans do not become stale due to schema changes or new table statistics. - The setting is used during `EXECUTE` commands and the `EXPLAIN ANALYZE` output includes a `plan type` field that displays: `generic, re-optimized` if the plan is optimized for the current execution, `generic, reused` if the plan is reused without re-optimization, or `custom` for other plans. -- The output of [`SHOW GRANTS`]({% link v24.2/show-grants.md %}) for a role now includes privileges inherited from the `public` role, which is a [default role]({% link v24.2/security-reference/authorization.md %}#default-roles) defined on every cluster. [#127086][#127086] +- The output of [`SHOW GRANTS`]({% link v24.2/show-grants.md %}) for a role now includes privileges inherited from the `public` role, which is a [default role]({% link v24.2/security-reference/authorization.md %}#default-roles) defined on every cluster. #127086

Operational changes

-- For the [TELEMETRY channel]({% link v24.2/logging.md %}#telemetry), TCL `sampled_query` events will now be sampled at the rate specified by the setting `sql.telemetry.query_sampling.max_event_frequency`, which is already used to limit the rate of sampling DML statements. [#126484][#126484] -- The `encode-uri` command now supports the `--certs-dir` option as an alternative to passing individual certificate paths. [#126793][#126793] -- Changed the metric type of runtime metrics that are semantically counters from `GAUGE` to `COUNTER`. [#127018][#127018] +- For the [TELEMETRY channel]({% link v24.2/logging.md %}#telemetry), TCL `sampled_query` events will now be sampled at the rate specified by the setting `sql.telemetry.query_sampling.max_event_frequency`, which is already used to limit the rate of sampling DML statements. #126484 +- The `encode-uri` command now supports the `--certs-dir` option as an alternative to passing individual certificate paths. #126793 +- Changed the metric type of runtime metrics that are semantically counters from `GAUGE` to `COUNTER`. #127018 - `storage.disk.io.time` - `storage.disk.read.bytes` - `storage.disk.read.count` @@ -67,22 +67,22 @@ Release Date: July 18, 2024

Command-line changes

-- The new `--shutdown` flag of the [`cockroach node drain`]({% link v24.2/cockroach-node.md %}#subcommands) command shuts down the node automatically after draining successfully completes. [#126950][#126950] +- The new `--shutdown` flag of the [`cockroach node drain`]({% link v24.2/cockroach-node.md %}#subcommands) command shuts down the node automatically after draining successfully completes. #126950

Bug fixes

-- Fixed a bug on the node list of the [Cluster overview]({% link v24.2/ui-cluster-overview-page.md %}) page where the icons present on certain tables to expand and collapse expandable rows did not work. [#126639][#126639] -- Fixed a bug that prevented fast path inserts into regional by row tables with uniqueness constraints under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation. [#126504][#126504] -- Fixed a bug where the `sql.stats.discarded.current` [metric]({% link v24.2/metrics.md %}) omitted discarded statements from its count. Both discarded statements and transactions are included in the metric. [#126585][#126585] -- Fixed a bug where the [Database page]({% link v24.2/ui-databases-page.md %}) could crash if range information is not available. [#126419][#126419] -- Fixed a bug that caused [background jobs]({% link v24.2/show-jobs.md %}) to incorrectly respect a statement timeout. [#126336][#126336] -- Fixed a bug when [creating partial statistics]({% link v24.2/create-statistics.md %}) with the [USING EXTREMES option]({% link v24.2/sql-grammar.md %}#create_stats_option) (disabled by default) where the merged statistic could contain inaccurate `DISTINCT` counts. [#126830][#126830] -- Fixed bug where a [replication zone configuration]({% link v24.2/configure-replication-zones.md %}) for a partition key could disappear during truncation. [#126531][#126531] +- Fixed a bug on the node list of the [Cluster overview]({% link v24.2/ui-cluster-overview-page.md %}) page where the icons present on certain tables to expand and collapse expandable rows did not work. #126639 +- Fixed a bug that prevented fast path inserts into regional by row tables with uniqueness constraints under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation. #126504 +- Fixed a bug where the `sql.stats.discarded.current` [metric]({% link v24.2/metrics.md %}) omitted discarded statements from its count. Both discarded statements and transactions are included in the metric. #126585 +- Fixed a bug where the [Database page]({% link v24.2/ui-databases-page.md %}) could crash if range information is not available. #126419 +- Fixed a bug that caused [background jobs]({% link v24.2/show-jobs.md %}) to incorrectly respect a statement timeout. #126336 +- Fixed a bug when [creating partial statistics]({% link v24.2/create-statistics.md %}) with the [USING EXTREMES option]({% link v24.2/sql-grammar.md %}#create_stats_option) (disabled by default) where the merged statistic could contain inaccurate `DISTINCT` counts. #126830 +- Fixed bug where a [replication zone configuration]({% link v24.2/configure-replication-zones.md %}) for a partition key could disappear during truncation. #126531

Performance improvements

-- The efficiency of merging partial statistics into existing [statistics]({% link v24.2/create-statistics.md %}) has been improved. [#126830][#126830] -- The [optimizer]({% link v24.2/cost-based-optimizer.md %}) now generates more efficient plans for queries with clauses like `ORDER BY col ASC NULLS LAST` and `ORDER BY col DESC NULLS FIRST` when `col` is guaranteed to not be `NULL`. [#126685][#126685] +- The efficiency of merging partial statistics into existing [statistics]({% link v24.2/create-statistics.md %}) has been improved. #126830 +- The [optimizer]({% link v24.2/cost-based-optimizer.md %}) now generates more efficient plans for queries with clauses like `ORDER BY col ASC NULLS LAST` and `ORDER BY col DESC NULLS FIRST` when `col` is guaranteed to not be `NULL`. #126685
@@ -92,28 +92,3 @@ This release includes 96 merged PRs by 49 authors.
-[#103156]: https://github.com/cockroachdb/cockroach/pull/103156 -[#124292]: https://github.com/cockroachdb/cockroach/pull/124292 -[#125319]: https://github.com/cockroachdb/cockroach/pull/125319 -[#125745]: https://github.com/cockroachdb/cockroach/pull/125745 -[#125767]: https://github.com/cockroachdb/cockroach/pull/125767 -[#126062]: https://github.com/cockroachdb/cockroach/pull/126062 -[#126336]: https://github.com/cockroachdb/cockroach/pull/126336 -[#126419]: https://github.com/cockroachdb/cockroach/pull/126419 -[#126484]: https://github.com/cockroachdb/cockroach/pull/126484 -[#126504]: https://github.com/cockroachdb/cockroach/pull/126504 -[#126528]: https://github.com/cockroachdb/cockroach/pull/126528 -[#126531]: https://github.com/cockroachdb/cockroach/pull/126531 -[#126575]: https://github.com/cockroachdb/cockroach/pull/126575 -[#126585]: https://github.com/cockroachdb/cockroach/pull/126585 -[#126639]: https://github.com/cockroachdb/cockroach/pull/126639 -[#126685]: https://github.com/cockroachdb/cockroach/pull/126685 -[#126708]: https://github.com/cockroachdb/cockroach/pull/126708 -[#126747]: https://github.com/cockroachdb/cockroach/pull/126747 -[#126793]: https://github.com/cockroachdb/cockroach/pull/126793 -[#126830]: https://github.com/cockroachdb/cockroach/pull/126830 -[#126950]: https://github.com/cockroachdb/cockroach/pull/126950 -[#127012]: https://github.com/cockroachdb/cockroach/pull/127012 -[#127018]: https://github.com/cockroachdb/cockroach/pull/127018 -[#127086]: https://github.com/cockroachdb/cockroach/pull/127086 -[#127124]: https://github.com/cockroachdb/cockroach/pull/127124 diff --git a/src/current/_includes/releases/v24.2/v24.2.0-beta.2.md b/src/current/_includes/releases/v24.2/v24.2.0-beta.2.md index a4269de5375..cf6e430f2b8 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0-beta.2.md +++ b/src/current/_includes/releases/v24.2/v24.2.0-beta.2.md @@ -6,7 +6,7 @@ Release Date: July 24, 2024

Security updates

-- URLs in the following SQL statements are now sanitized of any secrets before being written to [unredacted logs]({% link v24.2/configure-logs.md %}#redact-logs). [#127360][#127360] +- URLs in the following SQL statements are now sanitized of any secrets before being written to [unredacted logs]({% link v24.2/configure-logs.md %}#redact-logs). #127360 - [`ALTER BACKUP SCHEDULE`]({% link v24.2/alter-backup.md %}) - [`ALTER BACKUP`]({% link v24.2/alter-backup.md %}) - [`ALTER CHANGEFEED SET sink`]({% link v24.2/alter-changefeed.md %}) @@ -24,14 +24,14 @@ Release Date: July 24, 2024

SQL language changes

-- The new [cluster setting]({% link v24.2/cluster-settings.md %}) [`sql.auth.grant_option_for_owner.enabled`]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled) controls whether the owner of an object has permission to grant permission on the object to other [users]({% link v24.2/security-reference/authorization.md %}#roles). When set to `true` (the default), the [`GRANT OPTION`]({% link v24.2/show-grants.md %}#privilege-grants) is is implicitly granted to the object owner, who can grant permissions on the object to other users, preserving the existing behavior of CockroachDB. When set to `false`, the `GRANT OPTION` is not implicitly given to the owner of an object. The owner's permissions do not change, but they can no longer grant permissions to others unless the `GRANT OPTION` is granted to them explicitly. [#126960][#126960] -- Fixed a bug in which the `DISCARD` statement was disallowed when the [session setting]({% link v24.2/session-variables.md %}#default-transaction-read-only) `default_transaction_read_only = on`. [cockroachdb/cockroach#127364][#127364] +- The new [cluster setting]({% link v24.2/cluster-settings.md %}) [`sql.auth.grant_option_for_owner.enabled`]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled) controls whether the owner of an object has permission to grant permission on the object to other [users]({% link v24.2/security-reference/authorization.md %}#roles). When set to `true` (the default), the [`GRANT OPTION`]({% link v24.2/show-grants.md %}#privilege-grants) is is implicitly granted to the object owner, who can grant permissions on the object to other users, preserving the existing behavior of CockroachDB. When set to `false`, the `GRANT OPTION` is not implicitly given to the owner of an object. The owner's permissions do not change, but they can no longer grant permissions to others unless the `GRANT OPTION` is granted to them explicitly. #126960 +- Fixed a bug in which the `DISCARD` statement was disallowed when the [session setting]({% link v24.2/session-variables.md %}#default-transaction-read-only) `default_transaction_read_only = on`. [cockroachdb/cockroach#127364]#127364

Bug fixes

-- Fixed a bug that could cause [`CREATE INVERTED INDEX`]({% link v24.2/create-index.md %}#create-gin-indexes) and [`ALTER TABLE ... SET LOCALITY REGIONAL BY ROW`]({% link v24.2/alter-table.md %}#set-the-table-locality-to-regional-by-row) statements to fail if the corresponding table contained columns with non-standard characters in their names, such as tabs or newlines. This bug was introduced along with [inverted indexes]({% link v24.2/inverted-indexes.md %}) in v2.0. [#126996][#126996] -- Fixed a bug introduced in v23.2 that could cause a [gateway node]({% link v24.2/architecture/life-of-a-distributed-transaction.md %}#gateway) to crash while executing an [`INSERT`]({% link v24.2/insert.md %}) statement in a [`REGIONAL BY ROW`]({% link v24.2/alter-table.md %}#set-the-table-locality-to-regional-by-row) table. [#127278][#127278] -- Fixed a bug where a [schema change]({% link v24.2/online-schema-changes.md %}) could hang if the [lease]({% link v24.2/architecture/replication-layer.md %}#leases) [rangefeed]({% link v24.2/create-and-configure-changefeeds.md %}#enable-rangefeeds) stopped receiving updates. [#127488][#127488] +- Fixed a bug that could cause [`CREATE INVERTED INDEX`]({% link v24.2/create-index.md %}#create-gin-indexes) and [`ALTER TABLE ... SET LOCALITY REGIONAL BY ROW`]({% link v24.2/alter-table.md %}#set-the-table-locality-to-regional-by-row) statements to fail if the corresponding table contained columns with non-standard characters in their names, such as tabs or newlines. This bug was introduced along with [inverted indexes]({% link v24.2/inverted-indexes.md %}) in v2.0. #126996 +- Fixed a bug introduced in v23.2 that could cause a [gateway node]({% link v24.2/architecture/life-of-a-distributed-transaction.md %}#gateway) to crash while executing an [`INSERT`]({% link v24.2/insert.md %}) statement in a [`REGIONAL BY ROW`]({% link v24.2/alter-table.md %}#set-the-table-locality-to-regional-by-row) table. #127278 +- Fixed a bug where a [schema change]({% link v24.2/online-schema-changes.md %}) could hang if the [lease]({% link v24.2/architecture/replication-layer.md %}#leases) [rangefeed]({% link v24.2/create-and-configure-changefeeds.md %}#enable-rangefeeds) stopped receiving updates. #127488
@@ -41,10 +41,3 @@ This release includes 45 merged PRs by 18 authors.
-[#126960]: https://github.com/cockroachdb/cockroach/pull/126960 -[#126996]: https://github.com/cockroachdb/cockroach/pull/126996 -[#127278]: https://github.com/cockroachdb/cockroach/pull/127278 -[#127360]: https://github.com/cockroachdb/cockroach/pull/127360 -[#127364]: https://github.com/cockroachdb/cockroach/pull/127364 -[#127392]: https://github.com/cockroachdb/cockroach/pull/127392 -[#127488]: https://github.com/cockroachdb/cockroach/pull/127488 diff --git a/src/current/_includes/releases/v24.2/v24.2.0-beta.3.md b/src/current/_includes/releases/v24.2/v24.2.0-beta.3.md index 3ed6e1ea9a6..4fdc09b57af 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0-beta.3.md +++ b/src/current/_includes/releases/v24.2/v24.2.0-beta.3.md @@ -6,11 +6,11 @@ Release Date: August 1, 2024

Command-line changes

-- A `--locality-file` flag is now available on the [`cockroach start`]({% link v24.2/cockroach-start.md %}) and [`cockroach start-single-node`]({% link v24.2/cockroach-start-single-node.md %}) commands. This provides the option of specifing node [locality]({% link v24.2/cockroach-start.md %}#locality) (typically a `region` value) as a file, as an alternative to specifying this using the [`--locality` flag]({% link v24.2/cockroach-start.md %}#locality). [#127476][#127476] +- A `--locality-file` flag is now available on the [`cockroach start`]({% link v24.2/cockroach-start.md %}) and [`cockroach start-single-node`]({% link v24.2/cockroach-start-single-node.md %}) commands. This provides the option of specifing node [locality]({% link v24.2/cockroach-start.md %}#locality) (typically a `region` value) as a file, as an alternative to specifying this using the [`--locality` flag]({% link v24.2/cockroach-start.md %}#locality). #127476

Bug fixes

-- Fixed a formatting issue with the `sql_sequence_cached_node` value of the `serial_normalization` [setting]({% link v24.2/session-variables.md %}). This could lead to an error connecting to CockroachDB if this value was set as the default for `serial_normalization` via cluster setting [`sql.defaults.serial_normalization`]({% link v24.2/cluster-settings.md %}#setting-sql-defaults-serial-normalization). [#127674][#127674] -- Dropping [ENUM]({% link v24.2/enum.md %})-type values which were referenced by [index expressions]({% link v24.2/expression-indexes.md %}) could fail with an error. [#127455][#127455] +- Fixed a formatting issue with the `sql_sequence_cached_node` value of the `serial_normalization` [setting]({% link v24.2/session-variables.md %}). This could lead to an error connecting to CockroachDB if this value was set as the default for `serial_normalization` via cluster setting [`sql.defaults.serial_normalization`]({% link v24.2/cluster-settings.md %}#setting-sql-defaults-serial-normalization). #127674 +- Dropping [ENUM]({% link v24.2/enum.md %})-type values which were referenced by [index expressions]({% link v24.2/expression-indexes.md %}) could fail with an error. #127455 This release includes 13 merged PRs by 7 authors. diff --git a/src/current/_includes/releases/v24.2/v24.2.0-rc.1.md b/src/current/_includes/releases/v24.2/v24.2.0-rc.1.md index 02b1bfc756a..2350cc6bcca 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0-rc.1.md +++ b/src/current/_includes/releases/v24.2/v24.2.0-rc.1.md @@ -6,20 +6,20 @@ Release Date: August 7, 2024

{{ site.data.products.enterprise }} edition changes

-- Added a new Kafka [changefeed sink]({% link v24.2/changefeed-sinks.md %}) that uses the [`franz-go` library](https://github.com/twmb/franz-go) and CockroachDB's `batching_sink` implementation. The new Kafka sink can be enabled with the [`changefeed.new_kafka_sink_enabled`]({% link v24.2/cluster-settings.md %}) cluster setting, which is disabled by default. [#127899][#127899] -- The v2 Kafka [changefeed sink]({% link v24.2/changefeed-sinks.md %}) now supports [Amazon Managed Streaming for Apache Kafka (MSK)](https://aws.amazon.com/msk/) IAM SASL authentication. [#127899][#127899] +- Added a new Kafka [changefeed sink]({% link v24.2/changefeed-sinks.md %}) that uses the [`franz-go` library](https://github.com/twmb/franz-go) and CockroachDB's `batching_sink` implementation. The new Kafka sink can be enabled with the [`changefeed.new_kafka_sink_enabled`]({% link v24.2/cluster-settings.md %}) cluster setting, which is disabled by default. #127899 +- The v2 Kafka [changefeed sink]({% link v24.2/changefeed-sinks.md %}) now supports [Amazon Managed Streaming for Apache Kafka (MSK)](https://aws.amazon.com/msk/) IAM SASL authentication. #127899

DB Console changes

-- The [Databases]({% link v24.2/ui-databases-page.md %}) and [Tables]({% link v24.2/ui-databases-page.md %}#tables-view) pages in the [DB Console]({% link v24.2/ui-overview.md %}) will show a loading state while loading information for databases and tables including size and range counts. [#127696][#127696] -- On the [Database details]({% link v24.2/ui-databases-page.md %}) page, the table name will no longer appear with quotes around the schema and table name. [#127770][#127770] +- The [Databases]({% link v24.2/ui-databases-page.md %}) and [Tables]({% link v24.2/ui-databases-page.md %}#tables-view) pages in the [DB Console]({% link v24.2/ui-overview.md %}) will show a loading state while loading information for databases and tables including size and range counts. #127696 +- On the [Database details]({% link v24.2/ui-databases-page.md %}) page, the table name will no longer appear with quotes around the schema and table name. #127770

Bug fixes

-- Fixed a bug that caused a memory leak when executing SQL statements with comments, for example, `SELECT /* comment */ 1;`. Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the [SQL session]({% link v24.2/show-sessions.md %}). This bug has been present since v23.1. [#127760][#127760] -- Fixed a bug in [debug zip]({% link v24.2/cockroach-debug-zip.md %}) generation where an error was produced while fetching unstructured/malformed [logs]({% link v24.2/log-formats.md %}). [#127883][#127883] -- Fixed small memory leaks that occur during [changefeed creation]({% link v24.2/create-changefeed.md %}). [#127899][#127899] -- Fixed a [known limitation]({% link v24.2/physical-cluster-replication-overview.md %}#known-limitations) in which [fast cutback]({% link v24.2/cutover-replication.md %}#cut-back-to-the-original-primary-cluster) could fail. Users can now protect data for the [default protection window]({% link v24.2/physical-cluster-replication-technical-overview.md %}) of 4 hours on both the primary and the standby clusters. [#127892][#127892] +- Fixed a bug that caused a memory leak when executing SQL statements with comments, for example, `SELECT /* comment */ 1;`. Memory owned by a SQL session would continue to grow as these types of statements were executed. The memory would only be released when closing the [SQL session]({% link v24.2/show-sessions.md %}). This bug has been present since v23.1. #127760 +- Fixed a bug in [debug zip]({% link v24.2/cockroach-debug-zip.md %}) generation where an error was produced while fetching unstructured/malformed [logs]({% link v24.2/log-formats.md %}). #127883 +- Fixed small memory leaks that occur during [changefeed creation]({% link v24.2/create-changefeed.md %}). #127899 +- Fixed a [known limitation]({% link v24.2/physical-cluster-replication-overview.md %}#known-limitations) in which [fast cutback]({% link v24.2/cutover-replication.md %}#cut-back-to-the-original-primary-cluster) could fail. Users can now protect data for the [default protection window]({% link v24.2/physical-cluster-replication-technical-overview.md %}) of 4 hours on both the primary and the standby clusters. #127892
@@ -29,9 +29,3 @@ This release includes 29 merged PRs by 21 authors.
-[#127696]: https://github.com/cockroachdb/cockroach/pull/127696 -[#127760]: https://github.com/cockroachdb/cockroach/pull/127760 -[#127770]: https://github.com/cockroachdb/cockroach/pull/127770 -[#127883]: https://github.com/cockroachdb/cockroach/pull/127883 -[#127892]: https://github.com/cockroachdb/cockroach/pull/127892 -[#127899]: https://github.com/cockroachdb/cockroach/pull/127899 diff --git a/src/current/_includes/releases/v24.2/v24.2.0.md b/src/current/_includes/releases/v24.2/v24.2.0.md index b538ecba234..60047e2ca66 100644 --- a/src/current/_includes/releases/v24.2/v24.2.0.md +++ b/src/current/_includes/releases/v24.2/v24.2.0.md @@ -79,29 +79,29 @@ The following changes should be reviewed prior to upgrading. Default cluster set
Settings added
-- `debug.zip.redact_addresses`: Added the [cluster setting]({% link v24.2/cluster-settings.md %}) `debug.zip.redact_addresses.enabled` that allows the user to enable or disable redaction of fields like `hostname` and IP addresses. [#123544][#123544] -- `kv.transaction.randomized_anchor_key`: Previously, concurrent transactions that constructed large write batches could cause [hotspots]({% link v24.2/ui-hot-ranges-page.md %}). This was because the [transaction record]({% link v24.2/architecture/transaction-layer.md %}#transaction-records) for all [ranges]({% link v24.2/architecture/reads-and-writes-overview.md %}#range) would coalesce on a single range, which would then cause this range's [leaseholder]({% link v24.2/architecture/reads-and-writes-overview.md %}#leaseholder) to perform all intent resolution work. This is fixed by distributing transaction records randomly across the ranges the write batch touches. In turn, hotspots are prevented. [#125744][#125744] -- `server.oidc_authentication.client.timeout`: The new [cluster setting]({% link v24.2/cluster-settings.md %}#setting-server-oidc-authentication-client-timeout) `server.oidc_authentication.client.timeout` allows configuration of the HTTP client timeout for external requests made during [OIDC authentication]({% link v24.2/sso-db-console.md %}). The default timeout is 30 seconds. [#125767][#125767] -- `sql.auth.grant_option_for_owner.enabled`: The new [cluster setting]({% link v24.2/cluster-settings.md %}) [`sql.auth.grant_option_for_owner.enabled`]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled) controls whether the owner of an object has permission to grant permission on the object to other [users]({% link v24.2/security-reference/authorization.md %}#roles). When set to `true` (the default), the [`GRANT OPTION`]({% link v24.2/show-grants.md %}#privilege-grants) is implicitly granted to the object owner, who can grant permissions on the object to other users, preserving the existing behavior of CockroachDB. When set to `false`, the `GRANT OPTION` is not implicitly given to the owner of an object. The owner's permissions do not change, but they can no longer grant permissions to others unless the `GRANT OPTION` is granted to them explicitly. [#126960][#126960] -- `sql.auth.grant_option_inheritance.enabled`: Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v24.2/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. [#125590][#125590] -- `storage.sstable.compression_algorithm_backup_storage`, `storage.sstable.compression_algorithm_backup_transport`: Added two new [cluster settings]({% link v24.2/cluster-settings.md %}), `storage.sstable.compression_algorithm_backup_storage` and `storage.sstable.compression_algorithm_backup_transport`, which in addition to the existing cluster setting `storage.sstable.compression_algorithm`, can be used to alter the compression algorithm used for various types of [SSTs]({% link v24.2/architecture/storage-layer.md %}#ssts). [#124245][#124245] +- `debug.zip.redact_addresses`: Added the [cluster setting]({% link v24.2/cluster-settings.md %}) `debug.zip.redact_addresses.enabled` that allows the user to enable or disable redaction of fields like `hostname` and IP addresses. #123544 +- `kv.transaction.randomized_anchor_key`: Previously, concurrent transactions that constructed large write batches could cause [hotspots]({% link v24.2/ui-hot-ranges-page.md %}). This was because the [transaction record]({% link v24.2/architecture/transaction-layer.md %}#transaction-records) for all [ranges]({% link v24.2/architecture/reads-and-writes-overview.md %}#range) would coalesce on a single range, which would then cause this range's [leaseholder]({% link v24.2/architecture/reads-and-writes-overview.md %}#leaseholder) to perform all intent resolution work. This is fixed by distributing transaction records randomly across the ranges the write batch touches. In turn, hotspots are prevented. #125744 +- `server.oidc_authentication.client.timeout`: The new [cluster setting]({% link v24.2/cluster-settings.md %}#setting-server-oidc-authentication-client-timeout) `server.oidc_authentication.client.timeout` allows configuration of the HTTP client timeout for external requests made during [OIDC authentication]({% link v24.2/sso-db-console.md %}). The default timeout is 30 seconds. #125767 +- `sql.auth.grant_option_for_owner.enabled`: The new [cluster setting]({% link v24.2/cluster-settings.md %}) [`sql.auth.grant_option_for_owner.enabled`]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-for-owner-enabled) controls whether the owner of an object has permission to grant permission on the object to other [users]({% link v24.2/security-reference/authorization.md %}#roles). When set to `true` (the default), the [`GRANT OPTION`]({% link v24.2/show-grants.md %}#privilege-grants) is implicitly granted to the object owner, who can grant permissions on the object to other users, preserving the existing behavior of CockroachDB. When set to `false`, the `GRANT OPTION` is not implicitly given to the owner of an object. The owner's permissions do not change, but they can no longer grant permissions to others unless the `GRANT OPTION` is granted to them explicitly. #126960 +- `sql.auth.grant_option_inheritance.enabled`: Added the [`sql.auth.grant_option_inheritance.enabled` cluster setting]({% link v24.2/cluster-settings.md %}#setting-sql-auth-grant-option-inheritance-enabled). The default value is `true`, which maintains consistency with CockroachDB's previous behavior: users granted a privilege with [`WITH GRANT OPTION`]({% link v24.2/grant.md %}) can in turn grant that privilege to others. When `sql.auth.grant_option_inheritance.enabled` is set to `false`, the `GRANT OPTION` is not inherited through role membership, thereby preventing descendant roles from granting the privilege to others. However, the privilege itself continues to be inherited through role membership. #125590 +- `storage.sstable.compression_algorithm_backup_storage`, `storage.sstable.compression_algorithm_backup_transport`: Added two new [cluster settings]({% link v24.2/cluster-settings.md %}), `storage.sstable.compression_algorithm_backup_storage` and `storage.sstable.compression_algorithm_backup_transport`, which in addition to the existing cluster setting `storage.sstable.compression_algorithm`, can be used to alter the compression algorithm used for various types of [SSTs]({% link v24.2/architecture/storage-layer.md %}#ssts). #124245
Settings removed
-- `kv.rangefeed.range_stuck_threshold`: Removed the stuck rangefeed cancel feature and its related [cluster setting]({% link v24.2/cluster-settings.md %}) `kv.rangefeed.range_stuck_threshold`, because it was only available in [non-mux rangefeeds]({% link v24.2/advanced-changefeed-configuration.md %}#mux-rangefeeds). Previously, the stuck rangefeed cancel feature was introduced to restart single rangefeeds automatically if they had not received KV updates for some time. [#125663][#125663] -- `storage.value_blocks.enabled`: The `storage.value_blocks.enabled` [cluster setting]({% link v24.2/cluster-settings.md %}) has been removed; value blocks are always enabled. [#122164][#122164] +- `kv.rangefeed.range_stuck_threshold`: Removed the stuck rangefeed cancel feature and its related [cluster setting]({% link v24.2/cluster-settings.md %}) `kv.rangefeed.range_stuck_threshold`, because it was only available in [non-mux rangefeeds]({% link v24.2/advanced-changefeed-configuration.md %}#mux-rangefeeds). Previously, the stuck rangefeed cancel feature was introduced to restart single rangefeeds automatically if they had not received KV updates for some time. #125663 +- `storage.value_blocks.enabled`: The `storage.value_blocks.enabled` [cluster setting]({% link v24.2/cluster-settings.md %}) has been removed; value blocks are always enabled. #122164
Settings with changed defaults
- `kv.dist_sender.circuit_breakers.mode` has had its default changed to `liveness range only = 1`. -- `sql.defaults.results_buffer.size` has had its default changed to `512 KiB`: The default value of the `sql.defaults.results_buffer.size` [cluster setting]({% link v24.2/cluster-settings.md %}) has been changed from 16KiB to 512KiB. This reduces the chance that clients using [`READ COMMITTED`]({% link v24.2/read-committed.md %}) transactions will encounter errors that cannot automatically be retried within CockroachDB. [#124633][#124633] -- `sql.metrics.max_mem_stmt_fingerprints` and `sql.metrics.max_mem_txn_fingerprints` have had their defaults changed to `7500`: The default values for the [cluster settings]({% link v24.2/cluster-settings.md %}) `sql.metrics.max_mem_stmt_fingerprints` and `sql.metrics.max_mem_txn_fingerprints` have been changed from `100000` to `7500`, thus lowering the default limits for in-memory statement and transaction fingerprints. [#123430][#123430] -- `sql.stats.histogram_samples.count` has had its default changed to `0`: Histograms are no longer constructed using a default sample size of `10k`. Samples are now sized dynamically based on table size unless the sample count has been set in the table or [cluster settings]({% link v24.2/cluster-settings.md %}). [#125345][#125345] -- `sql.ttl.default_delete_rate_limit` has had its default changed to `100`: The storage parameter `ttl_delete_rate_limit`, which determines the rate limit for deleting expired rows, is now set to `100` by default. [#124354][#124354] +- `sql.defaults.results_buffer.size` has had its default changed to `512 KiB`: The default value of the `sql.defaults.results_buffer.size` [cluster setting]({% link v24.2/cluster-settings.md %}) has been changed from 16KiB to 512KiB. This reduces the chance that clients using [`READ COMMITTED`]({% link v24.2/read-committed.md %}) transactions will encounter errors that cannot automatically be retried within CockroachDB. #124633 +- `sql.metrics.max_mem_stmt_fingerprints` and `sql.metrics.max_mem_txn_fingerprints` have had their defaults changed to `7500`: The default values for the [cluster settings]({% link v24.2/cluster-settings.md %}) `sql.metrics.max_mem_stmt_fingerprints` and `sql.metrics.max_mem_txn_fingerprints` have been changed from `100000` to `7500`, thus lowering the default limits for in-memory statement and transaction fingerprints. #123430 +- `sql.stats.histogram_samples.count` has had its default changed to `0`: Histograms are no longer constructed using a default sample size of `10k`. Samples are now sized dynamically based on table size unless the sample count has been set in the table or [cluster settings]({% link v24.2/cluster-settings.md %}). #125345 +- `sql.ttl.default_delete_rate_limit` has had its default changed to `100`: The storage parameter `ttl_delete_rate_limit`, which determines the rate limit for deleting expired rows, is now set to `100` by default. #124354
Settings with new options
-- `storage.sstable.compression_algorithm` has added the option `none = 3`: The compression option `none` was added to allow for the disabling of SSTable compression. This option is disabled by default, but can can be used with any of the three existing cluster settings that control SSTable compression: `storage.sstable.compression_algorithm`, `storage.sstable.compression_algorithm_backup_storage`, and `storage.sstable.compression_algorithm_backup_transport`. [#126508][#126508] +- `storage.sstable.compression_algorithm` has added the option `none = 3`: The compression option `none` was added to allow for the disabling of SSTable compression. This option is disabled by default, but can can be used with any of the three existing cluster settings that control SSTable compression: `storage.sstable.compression_algorithm`, `storage.sstable.compression_algorithm_backup_storage`, and `storage.sstable.compression_algorithm_backup_transport`. #126508
Settings with new aliases
@@ -143,15 +143,3 @@ Docs | [SQL Feature Support]({% link v24.2/sql-feature-support.m Docs | [Change Data Capture Overview]({% link v24.2/change-data-capture-overview.md %}) | This page summarizes CockroachDB's data streaming capabilities. Change data capture (CDC) provides efficient, distributed, row-level changefeeds into a configurable sink for downstream processing such as reporting, caching, or full-text indexing. Docs | [Backup Architecture]({% link v24.2/backup-architecture.md %}) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 -[#113893]: https://github.com/cockroachdb/cockroach/pull/113893 -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 -[#122368]: https://github.com/cockroachdb/cockroach/pull/122368 -[#116664]: https://github.com/cockroachdb/cockroach/pull/116664 -[#120509]: https://github.com/cockroachdb/cockroach/pull/120509 -[#118781]: https://github.com/cockroachdb/cockroach/pull/118781 -[#117729]: https://github.com/cockroachdb/cockroach/pull/117729 -[#120255]: https://github.com/cockroachdb/cockroach/pull/120255 -[#123430]: https://github.com/cockroachdb/cockroach/pull/123430 -[#124633]: https://github.com/cockroachdb/cockroach/pull/124633 -[#124354]: https://github.com/cockroachdb/cockroach/pull/124354 diff --git a/src/current/_includes/releases/v24.2/v24.2.1.md b/src/current/_includes/releases/v24.2/v24.2.1.md index 50128f9847f..101644b64ca 100644 --- a/src/current/_includes/releases/v24.2/v24.2.1.md +++ b/src/current/_includes/releases/v24.2/v24.2.1.md @@ -11,56 +11,37 @@ Release Date: September 5, 2024 - String that can be parsed as a valid JSON array of issuer URLs list: `['example.com/adfs','https://accounts.google.com']`. - String that can be parsed as a valid JSON and deserialized into a map of issuer URLs to corresponding JWKS URIs. In this case, CockroachDB will override the JWKS URI present in the issuer's well-known endpoint: `'{ "issuer_jwks_map": { "https://accounts.google.com": "https://www.googleapis.com/oauth2/v3/certs", "example.com/adfs": "https://example.com/adfs/discovery/keys" } }'`. - When `issuer_jwks_map` is set, CockroachDB directly uses the JWKS URI to get the key set. In all other cases where [`server.jwt_authentication.jwks_auto_fetch.enabled`]({% link v24.2/sso-sql.md %}#cluster-settings) is set, CockroachDB attempts to automatically obtain the JWKS URI first from the issuer's well-known endpoint. [#128178][#128178] + When `issuer_jwks_map` is set, CockroachDB directly uses the JWKS URI to get the key set. In all other cases where [`server.jwt_authentication.jwks_auto_fetch.enabled`]({% link v24.2/sso-sql.md %}#cluster-settings) is set, CockroachDB attempts to automatically obtain the JWKS URI first from the issuer's well-known endpoint. #128178

{{ site.data.products.enterprise }} edition changes

-- The new Kafka [changefeed sink]({% link v24.2/changefeed-sinks.md %}) is now enabled by default. To disable it, set the cluster setting [`changefeed.new_kafka_sink_enabled`]({% link v24.2/cluster-settings.md %}) to `false`. [#128700][#128700] -- The new Kafka sink and the Google Cloud Pub/Sub sink now display the topics that a changefeed will emit to. [#128332][#128332] +- The new Kafka [changefeed sink]({% link v24.2/changefeed-sinks.md %}) is now enabled by default. To disable it, set the cluster setting [`changefeed.new_kafka_sink_enabled`]({% link v24.2/cluster-settings.md %}) to `false`. #128700 +- The new Kafka sink and the Google Cloud Pub/Sub sink now display the topics that a changefeed will emit to. #128332

Operational changes

-- The cluster setting [`storage.ingestion.value_blocks.enabled`]({% link v24.2/cluster-settings.md %}#setting-storage-ingestion-value-blocks-enabled) can be set to `false` if a pathological huge [range]({% link v24.2/architecture/glossary.md %}#range) happens to occur in a cluster, and incoming [snapshots]({% link v24.2/architecture/replication-layer.md %}#snapshots) of that range are causing [OOMs]({% link v24.2/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). [#128098][#128098] +- The cluster setting [`storage.ingestion.value_blocks.enabled`]({% link v24.2/cluster-settings.md %}#setting-storage-ingestion-value-blocks-enabled) can be set to `false` if a pathological huge [range]({% link v24.2/architecture/glossary.md %}#range) happens to occur in a cluster, and incoming [snapshots]({% link v24.2/architecture/replication-layer.md %}#snapshots) of that range are causing [OOMs]({% link v24.2/cluster-setup-troubleshooting.md %}#out-of-memory-oom-crash). #128098 - Two new structured logging events report connection breakage during node shutdown. Previously, these logs existed but were unstructured. These logs appear in the [`OPS` logging channel]({% link v24.2/logging.md %}#ops). - The [`node_shutdown_connection_timeout`]({% link v24.2/eventlog.md %}#node_shutdown_connection_timeout) event is logged after the timeout defined by [`server.shutdown.connections.timeout`]({% link v24.2/cluster-settings.md %}#setting-server-shutdown-connection-wait) transpires, if there are still [open SQL connections]({% link v24.2/show-sessions.md %}). - - The [`node_shutdown_transaction_timeout`]({% link v24.2/eventlog.md %}#node_shutdown_transaction_timeout) event is logged after the timeout defined by [`server.shutdown.transactions.timeout`]({% link v24.2/cluster-settings.md %}#setting-server-shutdown-query-wait) transpires, if there are still open [transactions]({% link v24.2/transactions.md %}) on those SQL connections. [#128712][#128712] + - The [`node_shutdown_transaction_timeout`]({% link v24.2/eventlog.md %}#node_shutdown_transaction_timeout) event is logged after the timeout defined by [`server.shutdown.transactions.timeout`]({% link v24.2/cluster-settings.md %}#setting-server-shutdown-query-wait) transpires, if there are still open [transactions]({% link v24.2/transactions.md %}) on those SQL connections. #128712

DB Console changes

-- Corrected the series names in the legend for the [`Admission Queueing Delay p99 – Background (Elastic) CPU` graph]({% link v24.2/ui-overload-dashboard.md %}#admission-queueing-delay-p99-background-elastic-cpu) on the [Overload dashboard]({% link v24.2/ui-overload-dashboard.md %}) by removing the `KV write ' prefix. [#128891][#128891] -- Hovering on graphs on [Metrics dashboards]({% link v24.2/ui-overview.md %}#metrics) now highlights the line under the mouse pointer and displays the corresponding value near the mouse pointer. [#128412][#128412] +- Corrected the series names in the legend for the [`Admission Queueing Delay p99 – Background (Elastic) CPU` graph]({% link v24.2/ui-overload-dashboard.md %}#admission-queueing-delay-p99-background-elastic-cpu) on the [Overload dashboard]({% link v24.2/ui-overload-dashboard.md %}) by removing the `KV write ' prefix. #128891 +- Hovering on graphs on [Metrics dashboards]({% link v24.2/ui-overview.md %}#metrics) now highlights the line under the mouse pointer and displays the corresponding value near the mouse pointer. #128412

Bug fixes

-- Fixed a memory leak that could occur when a connection string specifies a [virtual cluster]({% link v24.2/cluster-virtualization-overview.md %}) that does not exist. [#128108][#128108] -- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v24.2/create-index.md %}) would not correctly short-circuit if the given index already existed. [#128240][#128240] -- Fixed a bug where syntax validation incorrectly prevented use of the `DESCENDING` clause for non-terminal columns of an [inverted index]({% link v24.2/inverted-indexes.md %}). Now only the last column of an inverted index is prevented from using `DESCENDING`. [#128240][#128240] -- Fixed a bug where an [index]({% link v24.2/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. [#128240][#128240] -- Setting or dropping a default value on a [computed column]({% link v24.2/computed-columns.md %}) is now disallowed -- even for null defaults. Previously, setting or dropping a default value on a computed column was a no-op; now it is an error. [#128465][#128465] -- Fixed a bug where a hash-sharded constraint could not be created if it referred to a column that had a backslash in its name. [#128522][#128522] +- Fixed a memory leak that could occur when a connection string specifies a [virtual cluster]({% link v24.2/cluster-virtualization-overview.md %}) that does not exist. #128108 +- Fixed a bug where [`CREATE INDEX IF NOT EXISTS`]({% link v24.2/create-index.md %}) would not correctly short-circuit if the given index already existed. #128240 +- Fixed a bug where syntax validation incorrectly prevented use of the `DESCENDING` clause for non-terminal columns of an [inverted index]({% link v24.2/inverted-indexes.md %}). Now only the last column of an inverted index is prevented from using `DESCENDING`. #128240 +- Fixed a bug where an [index]({% link v24.2/indexes.md %}) could store a column in the primary index if that column had a mixed-case name. #128240 +- Setting or dropping a default value on a [computed column]({% link v24.2/computed-columns.md %}) is now disallowed -- even for null defaults. Previously, setting or dropping a default value on a computed column was a no-op; now it is an error. #128465 +- Fixed a bug where a hash-sharded constraint could not be created if it referred to a column that had a backslash in its name. #128522 - Fixed a bug introduced in v23.1 where the output of [`EXPLAIN (OPT, REDACT)`]({% link v24.2/explain.md %}) for various `CREATE` statements was not redacted. This bug affects the following statements: - `EXPLAIN (OPT, REDACT) CREATE TABLE` - `EXPLAIN (OPT, REDACT) CREATE VIEW` - - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` [#128490][#128490] -- Fixed a bug where legacy and [declarative schema changes]({% link v24.2/online-schema-changes.md %}) could be executed concurrently, which could lead to failing or hung schema change jobs. [#128825][#128825] -- Fixed a bug that caused errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` when accessing a table with an [expression index]({% link v24.2/expression-indexes.md %}) where the expression evaluates to an [ENUM type]({% link v24.2/enum.md %}), such as `CREATE INDEX ON t ((col::an_enum))`. [#129094][#129094] + - `EXPLAIN (OPT, REDACT) CREATE FUNCTION` #128490 +- Fixed a bug where legacy and [declarative schema changes]({% link v24.2/online-schema-changes.md %}) could be executed concurrently, which could lead to failing or hung schema change jobs. #128825 +- Fixed a bug that caused errors like `ERROR: column 'crdb_internal_idx_expr' does not exist` when accessing a table with an [expression index]({% link v24.2/expression-indexes.md %}) where the expression evaluates to an [ENUM type]({% link v24.2/enum.md %}), such as `CREATE INDEX ON t ((col::an_enum))`. #129094 -[#128098]: https://github.com/cockroachdb/cockroach/pull/128098 -[#128108]: https://github.com/cockroachdb/cockroach/pull/128108 -[#128178]: https://github.com/cockroachdb/cockroach/pull/128178 -[#128188]: https://github.com/cockroachdb/cockroach/pull/128188 -[#128202]: https://github.com/cockroachdb/cockroach/pull/128202 -[#128240]: https://github.com/cockroachdb/cockroach/pull/128240 -[#128332]: https://github.com/cockroachdb/cockroach/pull/128332 -[#128349]: https://github.com/cockroachdb/cockroach/pull/128349 -[#128412]: https://github.com/cockroachdb/cockroach/pull/128412 -[#128465]: https://github.com/cockroachdb/cockroach/pull/128465 -[#128490]: https://github.com/cockroachdb/cockroach/pull/128490 -[#128522]: https://github.com/cockroachdb/cockroach/pull/128522 -[#128622]: https://github.com/cockroachdb/cockroach/pull/128622 -[#128700]: https://github.com/cockroachdb/cockroach/pull/128700 -[#128712]: https://github.com/cockroachdb/cockroach/pull/128712 -[#128825]: https://github.com/cockroachdb/cockroach/pull/128825 -[#128837]: https://github.com/cockroachdb/cockroach/pull/128837 -[#128891]: https://github.com/cockroachdb/cockroach/pull/128891 -[#129094]: https://github.com/cockroachdb/cockroach/pull/129094 diff --git a/src/current/_includes/releases/v24.2/v24.2.10.md b/src/current/_includes/releases/v24.2/v24.2.10.md index 808bc82fd58..c334e3a4ec7 100644 --- a/src/current/_includes/releases/v24.2/v24.2.10.md +++ b/src/current/_includes/releases/v24.2/v24.2.10.md @@ -6,52 +6,32 @@ Release Date: February 6, 2025

General changes

-- The protected timestamp (PTS) records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. [#138653][#138653] +- The protected timestamp (PTS) records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. #138653

SQL language changes

-- The `legacy_varchar_typing` session setting has been added, which reverts the changes of [#133037][#133037] that cause the change in typing behavior described in [#137837][#137837]. Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. [#138002][#138002] -- Since v23.2 table statistics histograms had been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. [#140267][#140267] -- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, CockroachDB does consider it a full scan. [#140271][#140271] -- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. [#140271][#140271] -- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of `0`, which is the default, indicates no lower bound. Note that if this is set to a value greater than `0`, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. [#140271][#140271] +- The `legacy_varchar_typing` session setting has been added, which reverts the changes of #133037 that cause the change in typing behavior described in #137837. Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. #138002 +- Since v23.2 table statistics histograms had been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. #140267 +- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, CockroachDB does consider it a full scan. #140271 +- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. #140271 +- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of `0`, which is the default, indicates no lower bound. Note that if this is set to a value greater than `0`, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. #140271

Operational changes

-- Schema object identifiers (e.g., database names, schema names, table names, function names, and type names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` log channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. [#138656][#138656] -- Added a metric, `sql.schema_changer.object_count`, that keeps track of the count of schema objects in the cluster. [#138838][#138838] -- The `changefeed.max_behind_nanos` metric now supports scoping with metrics labels. [#139233][#139233] +- Schema object identifiers (e.g., database names, schema names, table names, function names, and type names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` log channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. #138656 +- Added a metric, `sql.schema_changer.object_count`, that keeps track of the count of schema objects in the cluster. #138838 +- The `changefeed.max_behind_nanos` metric now supports scoping with metrics labels. #139233

Bug fixes

-- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. This bug had been present since at least v23.1. [#137791][#137791] -- `ALTER BACKUP SCHEDULE` no longer fails on schedules whose collection URI contains a space. [#138081][#138081] -- Previously, `SHOW CREATE TABLE` was showing incorrect data with regard to inverted indexes. It now shows the correct data in a format that can be repeatedly entered back into CockroachDB to recreate the same table. [#138084][#138084] -- Fixed a timing issue between `ALTER VIEW .. RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. [#137888][#137888] -- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. [#138284][#138284] -- Queries that perform a cast from the string representation of an array containing `GEOMETRY` or `GEOGRAPHY` types to a SQL `ARRAY` type will now succeed. [#138694][#138694] -- Fixed a bug where secondary tenants could fatal when issuing HTTP requests during tenant startup. [#138754][#138754] -- Fixed a bug where CockroachDB could encounter an internal error `comparison of two different versions of enum` in some cases when a user-defined type was modified within a transaction and subsequent statements read the column of that user-defined type. The bug was introduced in v24.2. [#138053][#138053] -- When the session variable `allow_role_memberships_to_change_during_transaction` is set, it is now possible to create and drop users quickly even when there are contending transactions on the `system.users` and `system.role_options` system tables. [#139031][#139031] -- Fixed a bug where the error `batch timestamp ... must be after replica GC threshold` could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the `failed` state. [#139249][#139249] - -[#137791]: https://github.com/cockroachdb/cockroach/pull/137791 -[#137888]: https://github.com/cockroachdb/cockroach/pull/137888 -[#138002]: https://github.com/cockroachdb/cockroach/pull/138002 -[#138053]: https://github.com/cockroachdb/cockroach/pull/138053 -[#138081]: https://github.com/cockroachdb/cockroach/pull/138081 -[#138084]: https://github.com/cockroachdb/cockroach/pull/138084 -[#138284]: https://github.com/cockroachdb/cockroach/pull/138284 -[#138653]: https://github.com/cockroachdb/cockroach/pull/138653 -[#138656]: https://github.com/cockroachdb/cockroach/pull/138656 -[#138694]: https://github.com/cockroachdb/cockroach/pull/138694 -[#138754]: https://github.com/cockroachdb/cockroach/pull/138754 -[#138838]: https://github.com/cockroachdb/cockroach/pull/138838 -[#138983]: https://github.com/cockroachdb/cockroach/pull/138983 -[#139031]: https://github.com/cockroachdb/cockroach/pull/139031 -[#139233]: https://github.com/cockroachdb/cockroach/pull/139233 -[#139249]: https://github.com/cockroachdb/cockroach/pull/139249 -[#140267]: https://github.com/cockroachdb/cockroach/pull/140267 -[#140271]: https://github.com/cockroachdb/cockroach/pull/140271 -[#133037]: https://github.com/cockroachdb/cockroach/pull/133037 -[#137837]: https://github.com/cockroachdb/cockroach/pull/137837 +- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. This bug had been present since at least v23.1. #137791 +- `ALTER BACKUP SCHEDULE` no longer fails on schedules whose collection URI contains a space. #138081 +- Previously, `SHOW CREATE TABLE` was showing incorrect data with regard to inverted indexes. It now shows the correct data in a format that can be repeatedly entered back into CockroachDB to recreate the same table. #138084 +- Fixed a timing issue between `ALTER VIEW .. RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. #137888 +- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. #138284 +- Queries that perform a cast from the string representation of an array containing `GEOMETRY` or `GEOGRAPHY` types to a SQL `ARRAY` type will now succeed. #138694 +- Fixed a bug where secondary tenants could fatal when issuing HTTP requests during tenant startup. #138754 +- Fixed a bug where CockroachDB could encounter an internal error `comparison of two different versions of enum` in some cases when a user-defined type was modified within a transaction and subsequent statements read the column of that user-defined type. The bug was introduced in v24.2. #138053 +- When the session variable `allow_role_memberships_to_change_during_transaction` is set, it is now possible to create and drop users quickly even when there are contending transactions on the `system.users` and `system.role_options` system tables. #139031 +- Fixed a bug where the error `batch timestamp ... must be after replica GC threshold` could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the `failed` state. #139249 + diff --git a/src/current/_includes/releases/v24.2/v24.2.2.md b/src/current/_includes/releases/v24.2/v24.2.2.md index 27bec07c3a6..b33a2526dab 100644 --- a/src/current/_includes/releases/v24.2/v24.2.2.md +++ b/src/current/_includes/releases/v24.2/v24.2.2.md @@ -6,7 +6,7 @@ Release Date: September 23, 2024

Bug fixes

-- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.2/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.2/table-localities.md %}#global-tables). [#130946][#130946] +- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.2/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.2/table-localities.md %}#global-tables). #130946
@@ -15,5 +15,3 @@ Release Date: September 23, 2024 This release includes 3 merged PRs by 2 authors.
- -[#130946]: https://github.com/cockroachdb/cockroach/pull/130946 \ No newline at end of file diff --git a/src/current/_includes/releases/v24.2/v24.2.3.md b/src/current/_includes/releases/v24.2/v24.2.3.md index 391b4957018..c95399b147a 100644 --- a/src/current/_includes/releases/v24.2/v24.2.3.md +++ b/src/current/_includes/releases/v24.2/v24.2.3.md @@ -6,46 +6,46 @@ Release Date: September 25, 2024

{{ site.data.products.enterprise }} edition changes

-- Added a `changefeed.protect_timestamp.lag` metric, which controls how much the changefeed [protected timestamp (PTS)]({% link v24.2/protect-changefeed-data.md %}) should lag behind the [high-water mark]({% link v24.2/how-does-an-enterprise-changefeed-work.md %}). A changefeed now only updates its PTS if `changefeed.protect_timestamp.lag` has passed between the last PTS and the changefeed high-water mark. [#129580][#129580] +- Added a `changefeed.protect_timestamp.lag` metric, which controls how much the changefeed [protected timestamp (PTS)]({% link v24.2/protect-changefeed-data.md %}) should lag behind the [high-water mark]({% link v24.2/how-does-an-enterprise-changefeed-work.md %}). A changefeed now only updates its PTS if `changefeed.protect_timestamp.lag` has passed between the last PTS and the changefeed high-water mark. #129580 - Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. These metrics track the number of bytes sent by individual changefeeds to the following sinks: - [Kafka sinks]({% link v24.2/changefeed-sinks.md %}#kafka). If [child metrics]({% link v24.2/child-metrics.md %}) are enabled, the metric will have a `kafka` label. - [Webhook sinks]({% link v24.2/changefeed-sinks.md %}#webhook-sink). If [child metrics]({% link v24.2/child-metrics.md %}) are enabled, the metric will have a `webhook` label. - [Pub/Sub sinks]({% link v24.2/changefeed-sinks.md %}#google-cloud-pub-sub). If [child metrics]({% link v24.2/child-metrics.md %}) are enabled, the metric will have a `pubsub` label. - - [SQL sink]({% link v24.2/changefeed-for.md %}). If [child metrics]({% link v24.2/child-metrics.md %}) are enabled, the metric will have a `sql` label. [#130582][#130582] + - [SQL sink]({% link v24.2/changefeed-for.md %}). If [child metrics]({% link v24.2/child-metrics.md %}) are enabled, the metric will have a `sql` label. #130582

Operational changes

-- Added a new configuration parameter, `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. [#129071][#129071] -- Modified the metrics `sql.bytesin` and `sql.bytesout` to become aggregation metrics if [child metrics]({% link v24.2/child-metrics.md %}) are enabled. [#129071][#129071] +- Added a new configuration parameter, `server.cidr_mapping_url`, which maps IPv4 CIDR blocks to arbitrary tag names. #129071 +- Modified the metrics `sql.bytesin` and `sql.bytesout` to become aggregation metrics if [child metrics]({% link v24.2/child-metrics.md %}) are enabled. #129071 - Added three new network tracking metrics: - `rpc.connection.connected` is the number of rRPC TCP-level connections established to remote nodes. - `rpc.client.bytes.egress` is the number of TCP bytes sent via gRPC on connections initiated by CockroachDB. - - `rpc.client.bytes.ingress` is the number of TCP bytes received via gRPC on connections initiated by CockroachDB. [#128957][#128957] + - `rpc.client.bytes.ingress` is the number of TCP bytes received via gRPC on connections initiated by CockroachDB. #128957

DB Console changes

-- Users with the `VIEWACTIVITY` [system privilege]({% link v24.2/security-reference/authorization.md %}#supported-privileges) can now download [statement bundles]({% link v24.2/ui-statements-page.md %}#diagnostics) from the DB Console. [#129504][#129504] -- Users with the `VIEWACTIVITY` [system privilege]({% link v24.2/security-reference/authorization.md %}#supported-privileges) can now request, view, and cancel [statement bundles]({% link v24.2/ui-statements-page.md %}#diagnostics) from the DB Console. [#129809][#129809] -- The DB Console now displays a notification to alert customers without an Enterprise license to upcoming license changes. [#130511][#130511] +- Users with the `VIEWACTIVITY` [system privilege]({% link v24.2/security-reference/authorization.md %}#supported-privileges) can now download [statement bundles]({% link v24.2/ui-statements-page.md %}#diagnostics) from the DB Console. #129504 +- Users with the `VIEWACTIVITY` [system privilege]({% link v24.2/security-reference/authorization.md %}#supported-privileges) can now request, view, and cancel [statement bundles]({% link v24.2/ui-statements-page.md %}#diagnostics) from the DB Console. #129809 +- The DB Console now displays a notification to alert customers without an Enterprise license to upcoming license changes. #130511

Bug fixes

-- Fixed a bug where `NaN` or `Inf` could not be used as the default value for a parameter in [`CREATE FUNCTION`]({% link v24.2/create-function.md %}) statements. [#129087][#129087] -- Fix a bug in which [`SELECT ... FOR UPDATE`]({% link v24.2/select-for-update.md %}) or [`SELECT ... FOR SHARE`]({% link v24.2/select-for-update.md %}) queries using `SKIP LOCKED` and a `LIMIT` and/or an `OFFSET` could return incorrect results under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation. This bug was present when support for `SKIP LOCKED` under `READ COMMITTED` isolation was introduced in v24.1.0. [#128102][#128102] -- Fixed a bug in which some [`SELECT ... FOR UPDATE`]({% link v24.2/select-for-update.md %}) or [`SELECT ... FOR SHARE`]({% link v24.2/select-for-update.md %}) queries using `SKIP LOCKED` could still block on locked rows when using [`optimizer_use_lock_op_for_serializable`]({% link v24.2/session-variables.md %}#optimizer-use-lock-op-for-serializable) under [`SERIALIZABLE`]({% link v24.2/demo-serializable.md %}) isolation. This bug was present when `optimizer_use_lock_op_for_serializable` was introduced in v23.2.0. [#128102][#128102] -- Fixed a bug in which [`SHOW CLUSTER SETTING FOR VIRTUAL CLUSTER`]({% link v24.2/show-cluster-setting.md %}) would erroneously return `NULL` for some settings. [#128784][#128784] -- [Function]({% link v24.2/user-defined-functions.md %}) input parameters can no longer have the `VOID` type. [#129275][#129275] -- Fixed a bug in [WAL failover]({% link v24.2/cockroach-start.md %}#enable-wal-failover) that could prevent a node from starting if it crashed during a failover. [#129364][#129364] -- Fixed a bug where starting up nodes could fail with `could not insert session ...: unexpected value` if an ambiguous result error was hit while inserting into the `sqlliveness` table. [#129235][#129235] -- Internally issued queries that are not initiated within a SQL session no longer respect a statement timeout. This includes: [background jobs]({% link v24.2/show-jobs.md %}), queries issued by the DB Console that perform introspection, and the {{ site.data.products.cloud }} [SQL shell]({% link cockroachcloud/sql-shell.md %}). [#129513][#129513] -- Fixed a rare bug in [`SHOW CLUSTER SETTING`]({% link v24.2/show-cluster-setting.md %}) that could cause it to fail with an error like `timed out: value differs between local setting and KV`. [#129745][#129745] -- Fixed a bug where the [`schema_locked` table parameter]({% link v24.2/with-storage-parameter.md %}#table-parameters) did not prevent a table from being referenced by a [foreign key]({% link v24.2/foreign-key.md %}). [#129755][#129755] -- Fixed a bug that could cause [`RESTORE`]({% link v24.2/restore.md %}) to hang after encountering transient errors from the [storage layer]({% link v24.2/architecture/storage-layer.md %}). [#129895][#129895] -- Fixed a bug where the [`require_explicit_primary_keys`]({% link v24.2/session-variables.md %}#require-explicit-primary-keys) session variable would overly aggressively prevent all [`CREATE TABLE`]({% link v24.2/create-table.md %}) statements from working. [#129908][#129908] -- Fixed a slow-building memory leak that could occur when using [Kerberos authentication]({% link v24.2/gssapi_authentication.md %}). [#130319][#130319] -- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.2/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.2/table-localities.md %}#global-tables). [#130951][#130951] -- Fixed a potential memory leak in changefeeds using a [cloud storage sink]({% link v24.2/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v24.2/cluster-virtualization-setting-scopes.md %}) and `changefeed.cloudstorage.async_flush.enabled` are true and the changefeed received an error while attempting to write to the cloud storage sink. [#130625][#130625] -- Fixed a bug that prevented buffered file sinks from being included when iterating over all file sinks. This led to problems such as the `debug zip` command not being able to fetch logs for a cluster where buffering was enabled. [#130158][#130158] +- Fixed a bug where `NaN` or `Inf` could not be used as the default value for a parameter in [`CREATE FUNCTION`]({% link v24.2/create-function.md %}) statements. #129087 +- Fix a bug in which [`SELECT ... FOR UPDATE`]({% link v24.2/select-for-update.md %}) or [`SELECT ... FOR SHARE`]({% link v24.2/select-for-update.md %}) queries using `SKIP LOCKED` and a `LIMIT` and/or an `OFFSET` could return incorrect results under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation. This bug was present when support for `SKIP LOCKED` under `READ COMMITTED` isolation was introduced in v24.1.0. #128102 +- Fixed a bug in which some [`SELECT ... FOR UPDATE`]({% link v24.2/select-for-update.md %}) or [`SELECT ... FOR SHARE`]({% link v24.2/select-for-update.md %}) queries using `SKIP LOCKED` could still block on locked rows when using [`optimizer_use_lock_op_for_serializable`]({% link v24.2/session-variables.md %}#optimizer-use-lock-op-for-serializable) under [`SERIALIZABLE`]({% link v24.2/demo-serializable.md %}) isolation. This bug was present when `optimizer_use_lock_op_for_serializable` was introduced in v23.2.0. #128102 +- Fixed a bug in which [`SHOW CLUSTER SETTING FOR VIRTUAL CLUSTER`]({% link v24.2/show-cluster-setting.md %}) would erroneously return `NULL` for some settings. #128784 +- [Function]({% link v24.2/user-defined-functions.md %}) input parameters can no longer have the `VOID` type. #129275 +- Fixed a bug in [WAL failover]({% link v24.2/cockroach-start.md %}#enable-wal-failover) that could prevent a node from starting if it crashed during a failover. #129364 +- Fixed a bug where starting up nodes could fail with `could not insert session ...: unexpected value` if an ambiguous result error was hit while inserting into the `sqlliveness` table. #129235 +- Internally issued queries that are not initiated within a SQL session no longer respect a statement timeout. This includes: [background jobs]({% link v24.2/show-jobs.md %}), queries issued by the DB Console that perform introspection, and the {{ site.data.products.cloud }} [SQL shell]({% link cockroachcloud/sql-shell.md %}). #129513 +- Fixed a rare bug in [`SHOW CLUSTER SETTING`]({% link v24.2/show-cluster-setting.md %}) that could cause it to fail with an error like `timed out: value differs between local setting and KV`. #129745 +- Fixed a bug where the [`schema_locked` table parameter]({% link v24.2/with-storage-parameter.md %}#table-parameters) did not prevent a table from being referenced by a [foreign key]({% link v24.2/foreign-key.md %}). #129755 +- Fixed a bug that could cause [`RESTORE`]({% link v24.2/restore.md %}) to hang after encountering transient errors from the [storage layer]({% link v24.2/architecture/storage-layer.md %}). #129895 +- Fixed a bug where the [`require_explicit_primary_keys`]({% link v24.2/session-variables.md %}#require-explicit-primary-keys) session variable would overly aggressively prevent all [`CREATE TABLE`]({% link v24.2/create-table.md %}) statements from working. #129908 +- Fixed a slow-building memory leak that could occur when using [Kerberos authentication]({% link v24.2/gssapi_authentication.md %}). #130319 +- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.2/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.2/table-localities.md %}#global-tables). #130951 +- Fixed a potential memory leak in changefeeds using a [cloud storage sink]({% link v24.2/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v24.2/cluster-virtualization-setting-scopes.md %}) and `changefeed.cloudstorage.async_flush.enabled` are true and the changefeed received an error while attempting to write to the cloud storage sink. #130625 +- Fixed a bug that prevented buffered file sinks from being included when iterating over all file sinks. This led to problems such as the `debug zip` command not being able to fetch logs for a cluster where buffering was enabled. #130158
@@ -55,33 +55,3 @@ This release includes 94 merged PRs by 38 authors.
-[#128102]: https://github.com/cockroachdb/cockroach/pull/128102 -[#128784]: https://github.com/cockroachdb/cockroach/pull/128784 -[#128957]: https://github.com/cockroachdb/cockroach/pull/128957 -[#129071]: https://github.com/cockroachdb/cockroach/pull/129071 -[#129087]: https://github.com/cockroachdb/cockroach/pull/129087 -[#129235]: https://github.com/cockroachdb/cockroach/pull/129235 -[#129275]: https://github.com/cockroachdb/cockroach/pull/129275 -[#129364]: https://github.com/cockroachdb/cockroach/pull/129364 -[#129385]: https://github.com/cockroachdb/cockroach/pull/129385 -[#129391]: https://github.com/cockroachdb/cockroach/pull/129391 -[#129403]: https://github.com/cockroachdb/cockroach/pull/129403 -[#129456]: https://github.com/cockroachdb/cockroach/pull/129456 -[#129504]: https://github.com/cockroachdb/cockroach/pull/129504 -[#129513]: https://github.com/cockroachdb/cockroach/pull/129513 -[#129580]: https://github.com/cockroachdb/cockroach/pull/129580 -[#129745]: https://github.com/cockroachdb/cockroach/pull/129745 -[#129755]: https://github.com/cockroachdb/cockroach/pull/129755 -[#129809]: https://github.com/cockroachdb/cockroach/pull/129809 -[#129895]: https://github.com/cockroachdb/cockroach/pull/129895 -[#129908]: https://github.com/cockroachdb/cockroach/pull/129908 -[#130158]: https://github.com/cockroachdb/cockroach/pull/130158 -[#130319]: https://github.com/cockroachdb/cockroach/pull/130319 -[#130511]: https://github.com/cockroachdb/cockroach/pull/130511 -[#130582]: https://github.com/cockroachdb/cockroach/pull/130582 -[#130625]: https://github.com/cockroachdb/cockroach/pull/130625 -[#130951]: https://github.com/cockroachdb/cockroach/pull/130951 -[10b47e3aa]: https://github.com/cockroachdb/cockroach/commit/10b47e3aa -[939ef54a1]: https://github.com/cockroachdb/cockroach/commit/939ef54a1 -[97ff913c1]: https://github.com/cockroachdb/cockroach/commit/97ff913c1 -[d7307d220]: https://github.com/cockroachdb/cockroach/commit/d7307d220 diff --git a/src/current/_includes/releases/v24.2/v24.2.4.md b/src/current/_includes/releases/v24.2/v24.2.4.md index 7a2758a3daa..0e145ac74b1 100644 --- a/src/current/_includes/releases/v24.2/v24.2.4.md +++ b/src/current/_includes/releases/v24.2/v24.2.4.md @@ -6,90 +6,56 @@ Release Date: October 17, 2024

{{ site.data.products.enterprise }} edition changes

-- Updated the cluster setting [`changefeed.sink_io_workers`]({% link v24.2/cluster-settings.md %}#setting-changefeed-sink-io-workers) with all the [changefeed sinks]({% link v24.1/changefeed-sinks.md %}) that support the setting. [#130374][#130374] -- Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. [#130578][#130578] +- Updated the cluster setting [`changefeed.sink_io_workers`]({% link v24.2/cluster-settings.md %}#setting-changefeed-sink-io-workers) with all the [changefeed sinks]({% link v24.1/changefeed-sinks.md %}) that support the setting. #130374 +- Added two network metrics, `changefeed.network.bytes_in` and `changefeed.network.bytes_out`. #130578 These metrics track the number of bytes sent by individual [changefeeds]({% link v24.2/change-data-capture-overview.md %}) to the following sinks: - [Kafka sinks]({% link v24.2/changefeed-sinks.md %}#kafka). If [child metrics are enabled]({% link v24.2/cluster-settings.md %}#setting-server-child-metrics-enabled), the metric will have a `kafka` label. - [Webhook sinks]({% link v24.2/changefeed-sinks.md %}#webhook-sink). If child metrics are enabled, the metric will have a `webhook` label. - [Pub/Sub sinks]({% link v24.2/changefeed-sinks.md %}#google-cloud-pub-sub). If child metrics are enabled, the metric will have a `pubsub` label. - [SQL sink]({% link v24.2/changefeed-for.md %}). If child metrics are enabled, the metric will have a `sql` label. -- Added a `changefeed.total_ranges` metric that can be used to monitor the number of ranges that are watched by [changefeed]({% link v24.2/change-data-capture-overview.md %}) aggregators. It shares the same polling interval as `changefeed.lagging_ranges`, which is controlled by the existing `lagging_ranges_polling_interval` option. [#130982][#130982] -- Disambiguated [metrics]({% link v24.2/essential-metrics-self-hosted.md %}) and [logs]({% link v24.2/logging-overview.md %}) for the two buffers used by the KV feed. The following metrics now have a suffix indicating which buffer they correspond to: `changefeed.buffer_entries.*`, `changefeed.buffer_entries_mem.*`, or `changefeed.buffer_pushback_nanos.*`. The previous metric names are retained for backward compatibility. [#131419][#131419] -- Added timers and corresponding [metrics]({% link v24.2/metrics.md %} for key parts of the [changefeed]({% link v24.2/change-data-capture-overview.md %}) pipeline to help debug issues with feeds. The `changefeed.stage.{stage}.latency` metrics now emit latency histograms for each stage. The metrics respect the changefeed `scope` label to debug a specific feed. [#131372][#131372] +- Added a `changefeed.total_ranges` metric that can be used to monitor the number of ranges that are watched by [changefeed]({% link v24.2/change-data-capture-overview.md %}) aggregators. It shares the same polling interval as `changefeed.lagging_ranges`, which is controlled by the existing `lagging_ranges_polling_interval` option. #130982 +- Disambiguated [metrics]({% link v24.2/essential-metrics-self-hosted.md %}) and [logs]({% link v24.2/logging-overview.md %}) for the two buffers used by the KV feed. The following metrics now have a suffix indicating which buffer they correspond to: `changefeed.buffer_entries.*`, `changefeed.buffer_entries_mem.*`, or `changefeed.buffer_pushback_nanos.*`. The previous metric names are retained for backward compatibility. #131419 +- Added timers and corresponding [metrics]({% link v24.2/metrics.md %} for key parts of the [changefeed]({% link v24.2/change-data-capture-overview.md %}) pipeline to help debug issues with feeds. The `changefeed.stage.{stage}.latency` metrics now emit latency histograms for each stage. The metrics respect the changefeed `scope` label to debug a specific feed. #131372

SQL language changes

-- The [session variable]({% link v24.2/set-vars.md %}) `enforce_home_region_follower_reads_enabled` is now deprecated, in favor of `enforce_home_region`. The deprecated variable will be removed in a future release. [#129587][#129587] +- The [session variable]({% link v24.2/set-vars.md %}) `enforce_home_region_follower_reads_enabled` is now deprecated, in favor of `enforce_home_region`. The deprecated variable will be removed in a future release. #129587

Operational changes

-- Added the new [metric]({% link v24.2/metrics.md %}) `ranges.decommissioning` to show the number of ranges that have a replica on a [decommissioning node]({% link v24.2/node-shutdown.md %}?filters=decommission). [#130247][#130247] -- You can now configure the log format for the [`stderr` log sink]({% link v24.2/configure-logs.md %}#output-to-stderr) by setting the `stderr.format` field in the [YAML configuration]({% link v24.2/configure-logs.md %}#yaml-payload). [#131539][#131539] +- Added the new [metric]({% link v24.2/metrics.md %}) `ranges.decommissioning` to show the number of ranges that have a replica on a [decommissioning node]({% link v24.2/node-shutdown.md %}?filters=decommission). #130247 +- You can now configure the log format for the [`stderr` log sink]({% link v24.2/configure-logs.md %}#output-to-stderr) by setting the `stderr.format` field in the [YAML configuration]({% link v24.2/configure-logs.md %}#yaml-payload). #131539

DB Console changes

-- Streamlined [metric chart]({% link v24.2/ui-overview.md %}#metrics) legends by removing the name of the chart from labels, where it was an identical prefix for all labels on the chart. [#129359][#129359] -- The [DB Console]({% link v24.2/ui-overview.md %}) now shows a notification if the cluster has no Enterprise license set. Refer to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) for more information. [#130417][#130417] +- Streamlined [metric chart]({% link v24.2/ui-overview.md %}#metrics) legends by removing the name of the chart from labels, where it was an identical prefix for all labels on the chart. #129359 +- The [DB Console]({% link v24.2/ui-overview.md %}) now shows a notification if the cluster has no Enterprise license set. Refer to [upcoming license changes](https://www.cockroachlabs.com/enterprise-license-update/) for more information. #130417

Bug fixes

-- Fixed a bug that could prevent [upgrade finalization]({% link v24.2/upgrade-cockroach-version.md %}) when attempting to resolve a large number of corrupt descriptors. [#130517][#130517] -- Fixed a potential memory leak in [changefeeds]({% link v24.2/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v24.2/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v24.2/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and `changefeed.cloudstorage.async_flush.enabled` were `true`, and the changefeed received an error while attempting to write to the cloud storage sink. [#130602][#130602] -- Fixed a bug where zone configuration changes issued by the [declarative schema changer]({% link v24.2/online-schema-changes.md %}) were not blocked if a table had `schema_locked` set. [#130705][#130705] -- Fixed a bug in which some [`SELECT FOR UPDATE`]({% link v24.2/select-for-update.md %}) or [`SELECT FOR SHARE`]({% link v24.2/select-for-update.md %}) queries using `NOWAIT` could still block on locked rows when using the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.2/session-variables.md %}) under [`SERIALIZABLE`]({% link v24.2/demo-serializable.md %}) isolation. This bug was introduced with [`optimizer_use_lock_op_for_serializable`]({% link v24.2/session-variables.md %}#optimizer-use-lock-op-for-serializable) in v23.2.0. [#130430][#130430] -- Fixed a bug that caused the [optimizer]({% link v24.2/cost-based-optimizer.md %}) to plan unnecessary post-query uniqueness checks during [`INSERT`]({% link v24.2/insert.md %}), [`UPSERT`]({% link v24.2/upsert.md %}), and [`UPDATE`]({% link v24.2/update.md %}) statements on tables with [partial]({% link v24.2/partial-indexes.md %}), [unique]({% link v24.2/create-index.md %}#unique-indexes) [hash-sharded indexes]({% link v24.2/hash-sharded-indexes.md %}). These unnecessary checks added overhead to execution of these statements, and caused the statements to error when executed under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation. [#130570][#130570] -- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.2/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.2/global-tables.md %}). [#130940][#130940] -- Fixed a bug where a connection could be dropped if the client was attempting a [schema change]({% link v24.2/online-schema-changes.md %}) while the same schema objects were being dropped. [#130962][#130962] -- Fixed a bug introduced in v23.2 where the [`IS NOT NULL`]({% link v24.2/null-handling.md %}#nulls-and-simple-comparisons) clause would incorrectly allow tuples containing `NULL` elements, e.g. `(1, NULL)` or `(NULL, NULL)`. [#130947][#130947] -- Fixed a bug that could cause errors with the message `internal error: Non-nullable column ...` when executing statements under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation that involved tables with [`NOT NULL`]({% link v24.2/not-null.md %}) [virtual columns]({% link v24.2/computed-columns.md %}). [#131019][#131019] -- Fixed a bug where [AWS S3 and HTTP client configurations]({% link v24.2/use-cloud-storage.md %}) were not considered when implicit authentication was used. [#131173][#131173] +- Fixed a bug that could prevent [upgrade finalization]({% link v24.2/upgrade-cockroach-version.md %}) when attempting to resolve a large number of corrupt descriptors. #130517 +- Fixed a potential memory leak in [changefeeds]({% link v24.2/change-data-capture-overview.md %}) using a [cloud storage sink]({% link v24.2/changefeed-sinks.md %}#cloud-storage-sink). The memory leak could occur if both [`changefeed.fast_gzip.enabled`]({% link v24.2/cluster-settings.md %}#setting-changefeed-fast-gzip-enabled) and `changefeed.cloudstorage.async_flush.enabled` were `true`, and the changefeed received an error while attempting to write to the cloud storage sink. #130602 +- Fixed a bug where zone configuration changes issued by the [declarative schema changer]({% link v24.2/online-schema-changes.md %}) were not blocked if a table had `schema_locked` set. #130705 +- Fixed a bug in which some [`SELECT FOR UPDATE`]({% link v24.2/select-for-update.md %}) or [`SELECT FOR SHARE`]({% link v24.2/select-for-update.md %}) queries using `NOWAIT` could still block on locked rows when using the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.2/session-variables.md %}) under [`SERIALIZABLE`]({% link v24.2/demo-serializable.md %}) isolation. This bug was introduced with [`optimizer_use_lock_op_for_serializable`]({% link v24.2/session-variables.md %}#optimizer-use-lock-op-for-serializable) in v23.2.0. #130430 +- Fixed a bug that caused the [optimizer]({% link v24.2/cost-based-optimizer.md %}) to plan unnecessary post-query uniqueness checks during [`INSERT`]({% link v24.2/insert.md %}), [`UPSERT`]({% link v24.2/upsert.md %}), and [`UPDATE`]({% link v24.2/update.md %}) statements on tables with [partial]({% link v24.2/partial-indexes.md %}), [unique]({% link v24.2/create-index.md %}#unique-indexes) [hash-sharded indexes]({% link v24.2/hash-sharded-indexes.md %}). These unnecessary checks added overhead to execution of these statements, and caused the statements to error when executed under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation. #130570 +- Fixed a bug that could result in the inability to garbage collect an [MVCC]({% link v24.2/architecture/storage-layer.md %}#mvcc) range tombstone within a [global table]({% link v24.2/global-tables.md %}). #130940 +- Fixed a bug where a connection could be dropped if the client was attempting a [schema change]({% link v24.2/online-schema-changes.md %}) while the same schema objects were being dropped. #130962 +- Fixed a bug introduced in v23.2 where the [`IS NOT NULL`]({% link v24.2/null-handling.md %}#nulls-and-simple-comparisons) clause would incorrectly allow tuples containing `NULL` elements, e.g. `(1, NULL)` or `(NULL, NULL)`. #130947 +- Fixed a bug that could cause errors with the message `internal error: Non-nullable column ...` when executing statements under [`READ COMMITTED`]({% link v24.2/read-committed.md %}) isolation that involved tables with [`NOT NULL`]({% link v24.2/not-null.md %}) [virtual columns]({% link v24.2/computed-columns.md %}). #131019 +- Fixed a bug where [AWS S3 and HTTP client configurations]({% link v24.2/use-cloud-storage.md %}) were not considered when implicit authentication was used. #131173 - Fixed a bug introduced in v23.1 that can cause incorrect query results under the following conditions: 1. The query contains a correlated subquery. 1. The correlated subquery has a `GroupBy` or `DistinctOn` operator with an outer-column reference in its input. 1. The correlated subquery is in the input of a `SELECT` or `JOIN` clause that has a filter that sets the outer-column reference equal to an inner column that is in the input of the grouping operator. 1. The set of grouping columns does not include the replacement column explicitly. - [#130990][#130990] -- Fixed a bug where jobs created in a session with a timezone offset configured could fail to start or could report an incorrect creation time in the output of [`SHOW JOBS`]({% link v24.2/show-jobs.md %}) and in the [DB Console]({% link v24.2/ui-overview.md %}). [#131407][#131407] -- Fixed a bug that could prevent a [changefeed]({% link v24.2/change-data-capture-overview.md %}) from resuming after a prolonged [paused state]({% link v24.2/create-and-configure-changefeeds.md %}#pause). [#130921][#130921] -- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. [#131391][#131391] + #130990 +- Fixed a bug where jobs created in a session with a timezone offset configured could fail to start or could report an incorrect creation time in the output of [`SHOW JOBS`]({% link v24.2/show-jobs.md %}) and in the [DB Console]({% link v24.2/ui-overview.md %}). #131407 +- Fixed a bug that could prevent a [changefeed]({% link v24.2/change-data-capture-overview.md %}) from resuming after a prolonged [paused state]({% link v24.2/create-and-configure-changefeeds.md %}#pause). #130921 +- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. #131391

Performance improvements

-- The [query optimizer]({% link v24.2/cost-based-optimizer.md %}) now plans limited [partial index]({% link v24.2/partial-indexes.md %}) scans in more cases when the new [session variable]({% link v24.2/session-variables.md %}) `optimizer_push_limit_into_project_filtered_scan` is set to `on`. [#130335][#130335] -- Reduced the write-amplification impact of rebalances by splitting snapshot [SST files]({% link v24.2/architecture/storage-layer.md %}#pebble) before ingesting them into [Pebble]({% link v24.2/architecture/storage-layer.md %}#ssts). [#128997][#128997] - -[#128997]: https://github.com/cockroachdb/cockroach/pull/128997 -[#129359]: https://github.com/cockroachdb/cockroach/pull/129359 -[#129587]: https://github.com/cockroachdb/cockroach/pull/129587 -[#130247]: https://github.com/cockroachdb/cockroach/pull/130247 -[#130335]: https://github.com/cockroachdb/cockroach/pull/130335 -[#130374]: https://github.com/cockroachdb/cockroach/pull/130374 -[#130417]: https://github.com/cockroachdb/cockroach/pull/130417 -[#130430]: https://github.com/cockroachdb/cockroach/pull/130430 -[#130517]: https://github.com/cockroachdb/cockroach/pull/130517 -[#130570]: https://github.com/cockroachdb/cockroach/pull/130570 -[#130578]: https://github.com/cockroachdb/cockroach/pull/130578 -[#130602]: https://github.com/cockroachdb/cockroach/pull/130602 -[#130673]: https://github.com/cockroachdb/cockroach/pull/130673 -[#130676]: https://github.com/cockroachdb/cockroach/pull/130676 -[#130705]: https://github.com/cockroachdb/cockroach/pull/130705 -[#130921]: https://github.com/cockroachdb/cockroach/pull/130921 -[#130935]: https://github.com/cockroachdb/cockroach/pull/130935 -[#130940]: https://github.com/cockroachdb/cockroach/pull/130940 -[#130947]: https://github.com/cockroachdb/cockroach/pull/130947 -[#130962]: https://github.com/cockroachdb/cockroach/pull/130962 -[#130982]: https://github.com/cockroachdb/cockroach/pull/130982 -[#130990]: https://github.com/cockroachdb/cockroach/pull/130990 -[#131019]: https://github.com/cockroachdb/cockroach/pull/131019 -[#131173]: https://github.com/cockroachdb/cockroach/pull/131173 -[#131199]: https://github.com/cockroachdb/cockroach/pull/131199 -[#131210]: https://github.com/cockroachdb/cockroach/pull/131210 -[#131239]: https://github.com/cockroachdb/cockroach/pull/131239 -[#131311]: https://github.com/cockroachdb/cockroach/pull/131311 -[#131372]: https://github.com/cockroachdb/cockroach/pull/131372 -[#131391]: https://github.com/cockroachdb/cockroach/pull/131391 -[#131407]: https://github.com/cockroachdb/cockroach/pull/131407 -[#131419]: https://github.com/cockroachdb/cockroach/pull/131419 -[#131539]: https://github.com/cockroachdb/cockroach/pull/131539 -[#131619]: https://github.com/cockroachdb/cockroach/pull/131619 +- The [query optimizer]({% link v24.2/cost-based-optimizer.md %}) now plans limited [partial index]({% link v24.2/partial-indexes.md %}) scans in more cases when the new [session variable]({% link v24.2/session-variables.md %}) `optimizer_push_limit_into_project_filtered_scan` is set to `on`. #130335 +- Reduced the write-amplification impact of rebalances by splitting snapshot [SST files]({% link v24.2/architecture/storage-layer.md %}#pebble) before ingesting them into [Pebble]({% link v24.2/architecture/storage-layer.md %}#ssts). #128997 + diff --git a/src/current/_includes/releases/v24.2/v24.2.5.md b/src/current/_includes/releases/v24.2/v24.2.5.md index 844401e23df..07f54b95ff1 100644 --- a/src/current/_includes/releases/v24.2/v24.2.5.md +++ b/src/current/_includes/releases/v24.2/v24.2.5.md @@ -6,44 +6,44 @@ Release Date: November 18, 2024

Security updates

-- Host-based authentication (HBA) configuration entries for LDAP will be evaluated for proper LDAP parameter values, and a valid and complete list of authentication method options is now required to amend HBA settings. [#132748][#132748] -- You can now authenticate to the DB Console by passing a JWT as the bearer token. [#133534][#133534] +- Host-based authentication (HBA) configuration entries for LDAP will be evaluated for proper LDAP parameter values, and a valid and complete list of authentication method options is now required to amend HBA settings. #132748 +- You can now authenticate to the DB Console by passing a JWT as the bearer token. #133534

General changes

-- Change the license `cockroach` is distributed under to the new CockroachDB Software License. [#131707][#131707] [#131956][#131956] [#131961][#131961] [#131983][#131983] [#132011][#132011] [#132013][#132013] [#132012][#132012] [#132015][#132015] [#132014][#132014] [#132016][#132016] [#132054][#132054] [#131970][#131970] [#132056][#132056] [#132801][#132801] [#132704][#132704] -- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has an Enterprise Trial or Enterprise Free license, or if the license cannot be loaded. [#132464][#132464] -- The new metrics `changefeed.sink_errors` and `changefeed.internal_retry_message_count` allow you to observe the rate of errors and internal retries for a sink, respectively. [#132353][#132353] -- Added a timer for inner sink client flushes. [#133197][#133197] +- Change the license `cockroach` is distributed under to the new CockroachDB Software License. #131707 #131956 #131961 #131983 #132011 #132013 #132012 #132015 #132014 #132016 #132054 #131970 #132056 #132801 #132704 +- The cluster setting `diagnostics.reporting.enabled` is now ignored if the cluster has an Enterprise Trial or Enterprise Free license, or if the license cannot be loaded. #132464 +- The new metrics `changefeed.sink_errors` and `changefeed.internal_retry_message_count` allow you to observe the rate of errors and internal retries for a sink, respectively. #132353 +- Added a timer for inner sink client flushes. #133197

DB Console changes

-- The DB Console now shows a warning if the cluster is throttled or will be throttled soon due to an expired Enterprise Free or Enterprise Trial license or due to missing telemetry data. Clusters with an Enterprise license are not throttled. [#132093][#132093] -- The Range Count column on the Databases page is no longer shown due to performance issues. This data is still available via the `SHOW RANGES` command. [#133268][#133268] +- The DB Console now shows a warning if the cluster is throttled or will be throttled soon due to an expired Enterprise Free or Enterprise Trial license or due to missing telemetry data. Clusters with an Enterprise license are not throttled. #132093 +- The Range Count column on the Databases page is no longer shown due to performance issues. This data is still available via the `SHOW RANGES` command. #133268

Bug fixes

-- Fixed a bug where timers were not correctly registered with the metric system. [#133197][#133197] -- Fixed a bug where the command-line interface would not correctly escape JSON values that had double quotes inside a string when using the `--format=sql` flag. [#131931][#131931] -- Fixed an error that could occur if a `SET` command used an aggregate function as the value. [#131960][#131960] -- Fixed a bug where ordering by `VECTOR` columns could result in an internal error in some cases. Now an `unimplemented` error is returned instead. [#132107][#132107] -- Added automated clean-up/validation for dropped roles inside default privileges. [#132136][#132136] -- Fixed a bug that that caused incorrect evaluation of a `CASE`, `COALESCE`, or `IF` expression with a branch that produced fixed-width string-like types, such as `CHAR`. [#130889][#130889] -- Fixed a bug that could cause the `BPCHAR` type to incorrectly impose a length limit of 1. [#130889][#130889] -- Fixed a rare bug that could prevent a backup from being restored and could cause the error `rewriting descriptor ids: missing rewrite for in SequenceOwner...`. This bug could occur only if a `DROP COLUMN` operation dropped a sequence while the backup was running. [#132326][#132326] -- Fixed a bug introduced in v23.1 that could cause incorrect results when a join evaluates columns with equivalent but non-identical types, such as `OID` and `REGCLASS`, for equality. The issue arises when the join performs an index lookup on an index that includes a computed column referencing one of the equivalent columns. [#132508][#132508] -- Fixed a bug introduced before v23.1 that could cause a composite sensitive expression to compare differently if comparing equivalent but non-identical input values, such as `2.0::DECIMAL` and `2.00::DECIMAL`. The issue arises when the join performs an index lookup on a table with a computed index column where the computed column expression is composite sensitive. [#132508][#132508] -- Fixed a bug where a span statistics request on a mixed-version cluster could result in a null pointer exception. [#132680][#132680] -- Updated the `franz-go` library to fix a potential deadlock when a changefeed restarts. [#132787][#132787] -- Fixed a bug where a changefeed could fail to update protected timestamp records after a retryable error. [#132773][#132773] -- Fixed a bug where a changefeed that used change data capture queries could fail after a system table was garbage collected. [#131649][#131649] -- Fixed a rare bug introduced in v22.2 where an update of a primary key column could fail to update the primary index if it is also the only column in a separate column family. [#132120][#132120] -- Fixed a bug where the `proretset` column of the `pg_catalog.pg_proc` table was incorrectly set to `false` for builtin functions that return a set. [#132876][#132876] -- Fixed a bug that could cause incorrect evaluation of scalar expressions with `NULL` values. [#132946][#132946] -- Fixed a rare bug in the query optimizer that could cause a node to crash if a query contained a filter in the form `col IN (elem0, elem1, ..., elemN)` when `N` is very large, in the order of millions, and when `col` exists in a hash-sharded index or when a table with an indexed computed column depends on `col`. [#132868][#132868] -- Fixed a bug where an `ALTER DEFAULT PRIVILEGES FOR target_role ...` command could result in an erroneous privilege error when run by a user with the `admin` role. [#133071][#133071] -- Fixed a bug where a `REASSIGN OWNED BY` command would fail to transfer ownership of the public schema, even when the schema was owned by the target role. [#133071][#133071] -- Fixed a panic when resolving the types of an `AS OF SYSTEM TIME` expression. [#132455][#132455] +- Fixed a bug where timers were not correctly registered with the metric system. #133197 +- Fixed a bug where the command-line interface would not correctly escape JSON values that had double quotes inside a string when using the `--format=sql` flag. #131931 +- Fixed an error that could occur if a `SET` command used an aggregate function as the value. #131960 +- Fixed a bug where ordering by `VECTOR` columns could result in an internal error in some cases. Now an `unimplemented` error is returned instead. #132107 +- Added automated clean-up/validation for dropped roles inside default privileges. #132136 +- Fixed a bug that that caused incorrect evaluation of a `CASE`, `COALESCE`, or `IF` expression with a branch that produced fixed-width string-like types, such as `CHAR`. #130889 +- Fixed a bug that could cause the `BPCHAR` type to incorrectly impose a length limit of 1. #130889 +- Fixed a rare bug that could prevent a backup from being restored and could cause the error `rewriting descriptor ids: missing rewrite for in SequenceOwner...`. This bug could occur only if a `DROP COLUMN` operation dropped a sequence while the backup was running. #132326 +- Fixed a bug introduced in v23.1 that could cause incorrect results when a join evaluates columns with equivalent but non-identical types, such as `OID` and `REGCLASS`, for equality. The issue arises when the join performs an index lookup on an index that includes a computed column referencing one of the equivalent columns. #132508 +- Fixed a bug introduced before v23.1 that could cause a composite sensitive expression to compare differently if comparing equivalent but non-identical input values, such as `2.0::DECIMAL` and `2.00::DECIMAL`. The issue arises when the join performs an index lookup on a table with a computed index column where the computed column expression is composite sensitive. #132508 +- Fixed a bug where a span statistics request on a mixed-version cluster could result in a null pointer exception. #132680 +- Updated the `franz-go` library to fix a potential deadlock when a changefeed restarts. #132787 +- Fixed a bug where a changefeed could fail to update protected timestamp records after a retryable error. #132773 +- Fixed a bug where a changefeed that used change data capture queries could fail after a system table was garbage collected. #131649 +- Fixed a rare bug introduced in v22.2 where an update of a primary key column could fail to update the primary index if it is also the only column in a separate column family. #132120 +- Fixed a bug where the `proretset` column of the `pg_catalog.pg_proc` table was incorrectly set to `false` for builtin functions that return a set. #132876 +- Fixed a bug that could cause incorrect evaluation of scalar expressions with `NULL` values. #132946 +- Fixed a rare bug in the query optimizer that could cause a node to crash if a query contained a filter in the form `col IN (elem0, elem1, ..., elemN)` when `N` is very large, in the order of millions, and when `col` exists in a hash-sharded index or when a table with an indexed computed column depends on `col`. #132868 +- Fixed a bug where an `ALTER DEFAULT PRIVILEGES FOR target_role ...` command could result in an erroneous privilege error when run by a user with the `admin` role. #133071 +- Fixed a bug where a `REASSIGN OWNED BY` command would fail to transfer ownership of the public schema, even when the schema was owned by the target role. #133071 +- Fixed a panic when resolving the types of an `AS OF SYSTEM TIME` expression. #132455 - Fixed a bug that could cause new connections to fail with the following error after upgrading: `ERROR: invalid value for parameter "vectorize": "unknown(1)" SQLSTATE: 22023 HINT: Available values: off,on,experimental_always`. To encounter this bug, the cluster must have: 1. Run on version v21.1 at some point in the past. 1. Run `SET CLUSTER SETTING sql.defaults.vectorize = 'on';` while running v21.1. @@ -64,55 +64,10 @@ Release Date: November 18, 2024 RESET CLUSTER SETTING sql.defaults.vectorize; ~~~ - `1` is now allowed as a value for this setting, and is equivalent to `on`. [#133371][#133371] [#133368][#133368] -- Fixed a rare bug that could cause unnecessarily high disk usage in the presence of high rebalance activity. [#133565][#133565] + `1` is now allowed as a value for this setting, and is equivalent to `on`. #133371 #133368 +- Fixed a rare bug that could cause unnecessarily high disk usage in the presence of high rebalance activity. #133565

Performance improvements

-- Performance has been improved during periodic polling of table history when `schema_locked` is not used. [#132191][#132191] - -[#130889]: https://github.com/cockroachdb/cockroach/pull/130889 -[#131649]: https://github.com/cockroachdb/cockroach/pull/131649 -[#131707]: https://github.com/cockroachdb/cockroach/pull/131707 -[#131931]: https://github.com/cockroachdb/cockroach/pull/131931 -[#131956]: https://github.com/cockroachdb/cockroach/pull/131956 -[#131960]: https://github.com/cockroachdb/cockroach/pull/131960 -[#131961]: https://github.com/cockroachdb/cockroach/pull/131961 -[#131970]: https://github.com/cockroachdb/cockroach/pull/131970 -[#131983]: https://github.com/cockroachdb/cockroach/pull/131983 -[#132011]: https://github.com/cockroachdb/cockroach/pull/132011 -[#132012]: https://github.com/cockroachdb/cockroach/pull/132012 -[#132013]: https://github.com/cockroachdb/cockroach/pull/132013 -[#132014]: https://github.com/cockroachdb/cockroach/pull/132014 -[#132015]: https://github.com/cockroachdb/cockroach/pull/132015 -[#132016]: https://github.com/cockroachdb/cockroach/pull/132016 -[#132054]: https://github.com/cockroachdb/cockroach/pull/132054 -[#132056]: https://github.com/cockroachdb/cockroach/pull/132056 -[#132093]: https://github.com/cockroachdb/cockroach/pull/132093 -[#132107]: https://github.com/cockroachdb/cockroach/pull/132107 -[#132120]: https://github.com/cockroachdb/cockroach/pull/132120 -[#132136]: https://github.com/cockroachdb/cockroach/pull/132136 -[#132191]: https://github.com/cockroachdb/cockroach/pull/132191 -[#132326]: https://github.com/cockroachdb/cockroach/pull/132326 -[#132353]: https://github.com/cockroachdb/cockroach/pull/132353 -[#132455]: https://github.com/cockroachdb/cockroach/pull/132455 -[#132464]: https://github.com/cockroachdb/cockroach/pull/132464 -[#132508]: https://github.com/cockroachdb/cockroach/pull/132508 -[#132680]: https://github.com/cockroachdb/cockroach/pull/132680 -[#132704]: https://github.com/cockroachdb/cockroach/pull/132704 -[#132739]: https://github.com/cockroachdb/cockroach/pull/132739 -[#132748]: https://github.com/cockroachdb/cockroach/pull/132748 -[#132773]: https://github.com/cockroachdb/cockroach/pull/132773 -[#132787]: https://github.com/cockroachdb/cockroach/pull/132787 -[#132801]: https://github.com/cockroachdb/cockroach/pull/132801 -[#132868]: https://github.com/cockroachdb/cockroach/pull/132868 -[#132876]: https://github.com/cockroachdb/cockroach/pull/132876 -[#132946]: https://github.com/cockroachdb/cockroach/pull/132946 -[#132959]: https://github.com/cockroachdb/cockroach/pull/132959 -[#133071]: https://github.com/cockroachdb/cockroach/pull/133071 -[#133197]: https://github.com/cockroachdb/cockroach/pull/133197 -[#133268]: https://github.com/cockroachdb/cockroach/pull/133268 -[#133368]: https://github.com/cockroachdb/cockroach/pull/133368 -[#133470]: https://github.com/cockroachdb/cockroach/pull/133470 -[#133534]: https://github.com/cockroachdb/cockroach/pull/133534 -[#133565]: https://github.com/cockroachdb/cockroach/pull/133565 +- Performance has been improved during periodic polling of table history when `schema_locked` is not used. #132191 + diff --git a/src/current/_includes/releases/v24.2/v24.2.6.md b/src/current/_includes/releases/v24.2/v24.2.6.md index 3cedad5d3a0..6c796d71e54 100644 --- a/src/current/_includes/releases/v24.2/v24.2.6.md +++ b/src/current/_includes/releases/v24.2/v24.2.6.md @@ -6,73 +6,40 @@ Release Date: December 12, 2024

Security updates

-- All cluster settings that accept strings are now fully redacted when transmitted as part of diagnostics telemetry. This payload includes a record of modified cluster settings and their values when they are not strings. Customers who previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}) can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134017][#134017] +- All cluster settings that accept strings are now fully redacted when transmitted as part of diagnostics telemetry. This payload includes a record of modified cluster settings and their values when they are not strings. Customers who previously applied the mitigations in [Technical Advisory 133479]({% link advisories/a133479.md %}) can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. #134017

General changes

-- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. [#134088][#134088] -- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps. This table is required for change data capture queries. [#134837][#134837] +- `COCKROACH_SKIP_ENABLING_DIAGNOSTIC_REPORTING` is no longer mentioned in the `cockroach demo` command. #134088 +- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps. This table is required for change data capture queries. #134837

Operational changes

-- The `goschedstats.always_use_short_sample_period.enabled` setting should be set to true for any production cluster, to prevent unnecessary queuing in admission control CPU queues. [#133584][#133584] -- Added a new cluster setting `ui.database_locality_metadata.enabled` that allows operators to disable loading extended database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 and later do not have this problem. If customers require this data, they can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. [#134095][#134095] -- Row-level TTL jobs now periodically log progress by showing the number of table spans that have been processed so far. [#135170][#135170] +- The `goschedstats.always_use_short_sample_period.enabled` setting should be set to true for any production cluster, to prevent unnecessary queuing in admission control CPU queues. #133584 +- Added a new cluster setting `ui.database_locality_metadata.enabled` that allows operators to disable loading extended database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 and later do not have this problem. If customers require this data, they can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. #134095 +- Row-level TTL jobs now periodically log progress by showing the number of table spans that have been processed so far. #135170

Bug fixes

-- Fixed a bug that caused non-reusable query plans, e.g., plans for DDL and `SHOW ...` statements, to be cached and reused in future executions, possibly causing stale results to be returned. This bug only occurred when `plan_cache_mode` was set to `auto` or `force_generic_plan`, both of which are not currently the default settings. [#133074][#133074] -- Previously, CockroachDB could encounter an internal error of the form `interface conversion: coldata.Column is` in an edge case and this is now fixed. The bug is present in v22.2.13+, v23.1.9+, v23.2+. [#133761][#133761] -- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT .. ON CONFLICT .. DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug has been present since at least v20.1.0. [#133821][#133821] -- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE FROM ... user` on an object that is not a sequence. [#133709][#133709] -- Addressed a panic inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. [#133869][#133869] -- `STRING`constants can now be compared against collated strings. [#134084][#134084] -- When executing queries with index / lookup joins when the ordering needs to be maintained, previously CockroachDB could experience increased query latency, possibly by several orders of magnitude. This bug was introduced in v22.2 and is now fixed. [#134366][#134366] -- Fixed a minor bug where `DISCARD ALL` statements were counted under the `sql.ddl.count` metric. Now these will be counted under the `sql.misc.count` metric. [#134509][#134509] -- Addressed a bug with `DROP CASCADE` that would occasionally panic with an undropped `backref` message on partitioned tables. [#134472][#134472] -- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. [#134600][#134600] -- Non-`admin` users that run `DROP ROLE IF EXISTS` on a user that does not exist will no longer receive an error message. [#134969][#134969] -- Fixed a bug that caused quotes around the name of a routine to be dropped when it was called within another routine. This could prevent the correct routine from being resolved if the nested routine name was case sensitive. The bug has existed since v24.1, when nested routines were introduced. [#134000][#134000] -- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). [#135076][#135076] -- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135180][#135180] -- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#135689][#135689] -- Fixed a bug where `ALTER DATABASE` operations that modify the zone config would hang if an invalid zone config already exists. [#135215][#135215] -- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. [#135927][#135927] -- Using more than one `DECLARE` statment in the definition of a user-defined function now correctly declares additional variables. [#135738][#135738] -- Fixed a bug where CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements. The bug has been present since v22.1. [#134992][#134992] -- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. [#136041][#136041] -- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136812][#136812] +- Fixed a bug that caused non-reusable query plans, e.g., plans for DDL and `SHOW ...` statements, to be cached and reused in future executions, possibly causing stale results to be returned. This bug only occurred when `plan_cache_mode` was set to `auto` or `force_generic_plan`, both of which are not currently the default settings. #133074 +- Previously, CockroachDB could encounter an internal error of the form `interface conversion: coldata.Column is` in an edge case and this is now fixed. The bug is present in v22.2.13+, v23.1.9+, v23.2+. #133761 +- Fixed a bug that caused incorrect `NOT NULL` constraint violation errors on `UPSERT` and `INSERT .. ON CONFLICT .. DO UPDATE` statements when those statements updated an existing row and a subset of columns that did not include a `NOT NULL` column of the table. This bug has been present since at least v20.1.0. #133821 +- Fixed an unhandled error that could occur when using `REVOKE ... ON SEQUENCE FROM ... user` on an object that is not a sequence. #133709 +- Addressed a panic inside `CREATE TABLE AS` if sequence builtin expressions had invalid function overloads. #133869 +- `STRING`constants can now be compared against collated strings. #134084 +- When executing queries with index / lookup joins when the ordering needs to be maintained, previously CockroachDB could experience increased query latency, possibly by several orders of magnitude. This bug was introduced in v22.2 and is now fixed. #134366 +- Fixed a minor bug where `DISCARD ALL` statements were counted under the `sql.ddl.count` metric. Now these will be counted under the `sql.misc.count` metric. #134509 +- Addressed a bug with `DROP CASCADE` that would occasionally panic with an undropped `backref` message on partitioned tables. #134472 +- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. #134600 +- Non-`admin` users that run `DROP ROLE IF EXISTS` on a user that does not exist will no longer receive an error message. #134969 +- Fixed a bug that caused quotes around the name of a routine to be dropped when it was called within another routine. This could prevent the correct routine from being resolved if the nested routine name was case sensitive. The bug has existed since v24.1, when nested routines were introduced. #134000 +- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). #135076 +- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. #135180 +- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. #135689 +- Fixed a bug where `ALTER DATABASE` operations that modify the zone config would hang if an invalid zone config already exists. #135215 +- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. #135927 +- Using more than one `DECLARE` statment in the definition of a user-defined function now correctly declares additional variables. #135738 +- Fixed a bug where CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements. The bug has been present since v22.1. #134992 +- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. #136041 +- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. #136812 -[#133074]: https://github.com/cockroachdb/cockroach/pull/133074 -[#133584]: https://github.com/cockroachdb/cockroach/pull/133584 -[#133709]: https://github.com/cockroachdb/cockroach/pull/133709 -[#133761]: https://github.com/cockroachdb/cockroach/pull/133761 -[#133821]: https://github.com/cockroachdb/cockroach/pull/133821 -[#133869]: https://github.com/cockroachdb/cockroach/pull/133869 -[#134000]: https://github.com/cockroachdb/cockroach/pull/134000 -[#134017]: https://github.com/cockroachdb/cockroach/pull/134017 -[#134084]: https://github.com/cockroachdb/cockroach/pull/134084 -[#134088]: https://github.com/cockroachdb/cockroach/pull/134088 -[#134095]: https://github.com/cockroachdb/cockroach/pull/134095 -[#134099]: https://github.com/cockroachdb/cockroach/pull/134099 -[#134366]: https://github.com/cockroachdb/cockroach/pull/134366 -[#134447]: https://github.com/cockroachdb/cockroach/pull/134447 -[#134472]: https://github.com/cockroachdb/cockroach/pull/134472 -[#134509]: https://github.com/cockroachdb/cockroach/pull/134509 -[#134600]: https://github.com/cockroachdb/cockroach/pull/134600 -[#134646]: https://github.com/cockroachdb/cockroach/pull/134646 -[#134730]: https://github.com/cockroachdb/cockroach/pull/134730 -[#134837]: https://github.com/cockroachdb/cockroach/pull/134837 -[#134969]: https://github.com/cockroachdb/cockroach/pull/134969 -[#134992]: https://github.com/cockroachdb/cockroach/pull/134992 -[#135076]: https://github.com/cockroachdb/cockroach/pull/135076 -[#135170]: https://github.com/cockroachdb/cockroach/pull/135170 -[#135180]: https://github.com/cockroachdb/cockroach/pull/135180 -[#135215]: https://github.com/cockroachdb/cockroach/pull/135215 -[#135611]: https://github.com/cockroachdb/cockroach/pull/135611 -[#135689]: https://github.com/cockroachdb/cockroach/pull/135689 -[#135738]: https://github.com/cockroachdb/cockroach/pull/135738 -[#135927]: https://github.com/cockroachdb/cockroach/pull/135927 -[#136010]: https://github.com/cockroachdb/cockroach/pull/136010 -[#136041]: https://github.com/cockroachdb/cockroach/pull/136041 -[#136812]: https://github.com/cockroachdb/cockroach/pull/136812 diff --git a/src/current/_includes/releases/v24.2/v24.2.7.md b/src/current/_includes/releases/v24.2/v24.2.7.md index 7a06916a8da..bb1b5e55810 100644 --- a/src/current/_includes/releases/v24.2/v24.2.7.md +++ b/src/current/_includes/releases/v24.2/v24.2.7.md @@ -6,7 +6,5 @@ Release Date: December 26, 2024

SQL language changes

-- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. [#137944][#137944] +- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. #137944 - -[#137944]: https://github.com/cockroachdb/cockroach/pull/137944 \ No newline at end of file diff --git a/src/current/_includes/releases/v24.2/v24.2.8.md b/src/current/_includes/releases/v24.2/v24.2.8.md index a03ecb132ec..03323ae5e16 100644 --- a/src/current/_includes/releases/v24.2/v24.2.8.md +++ b/src/current/_includes/releases/v24.2/v24.2.8.md @@ -11,74 +11,45 @@ Release Date: January 9, 2025 - `changefeed.parallel_io_result_queue_nanos` - `changefeed.sink_batch_hist_nanos` - `changefeed.flush_hist_nanos` - - `changefeed.kafka_throttling_hist_nanos` [#136603][#136603] -- Added support for multiple seed brokers in the new Kafka sink. [#136748][#136748] -- Added a new metric (`distsender.rangefeed.catchup_ranges_waiting_client_side`) that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. [#136835][#136835] -- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. [#136016][#136016] -- Added a no-op `AWS_USE_PATH_STYLE` parameter for forward compatibility with v24.3. [#137025][#137025] + - `changefeed.kafka_throttling_hist_nanos` #136603 +- Added support for multiple seed brokers in the new Kafka sink. #136748 +- Added a new metric (`distsender.rangefeed.catchup_ranges_waiting_client_side`) that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. #136835 +- Added changefeed support for the `mvcc_timestamp` option with the `avro` format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's MVCC timestamp with the row data. #136016 +- Added a no-op `AWS_USE_PATH_STYLE` parameter for forward compatibility with v24.3. #137025

SQL language changes

-- Added the `legacy_varchar_typing` session setting, which reverts the changes of [#133037](https://github.com/cockroachdb/cockroach/pull/133037) that causes the change in typing behavior described in [#137837](https://github.com/cockroachdb/cockroach/pull/137837). Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. [#137920][#137920] +- Added the `legacy_varchar_typing` session setting, which reverts the changes of #133037 that causes the change in typing behavior described in #137837. Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. #137920

Operational changes

-- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added out of precaution in case it was necessary to revert back to the old behavior for looking up role memberships, but this escape hatch has never been needed in practice since this was added in v23.1. [#136161][#136161] -- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. [#136479][#136479] -- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. [#136928][#136928] -- Added a new configurable parameter `kv.transaction.max_intents_and_locks` that will prevent transactions from creating too many intents. [#137700][#137700] -- Added the metric `txn.count_limit_rejected`, which tracks the KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed. [#137700][#137700] -- Added the metric `txn.count_limit_on_response`, which tracks the number of KV transactions that have exceeded the count limit on a response. [#137700][#137700] +- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added out of precaution in case it was necessary to revert back to the old behavior for looking up role memberships, but this escape hatch has never been needed in practice since this was added in v23.1. #136161 +- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. #136479 +- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. #136928 +- Added a new configurable parameter `kv.transaction.max_intents_and_locks` that will prevent transactions from creating too many intents. #137700 +- Added the metric `txn.count_limit_rejected`, which tracks the KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed. #137700 +- Added the metric `txn.count_limit_on_response`, which tracks the number of KV transactions that have exceeded the count limit on a response. #137700

Bug fixes

-- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. [#136365][#136365] -- Fixed a bug where `CREATE RELATION / TYPE` could leave dangling namespace entries if the schema was concurrently being dropped. [#136379][#136379] -- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. [#136502][#136502] -- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136759][#136759] -- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. [#136165][#136165] -- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. [#136652][#136652] -- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`. The fix ensures proper behavior on cluster restarts. [#137011][#137011] -- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. [#137354][#137354] -- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. [#137113][#137113] -- Fixed a bug that has existed since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. [#137377][#137377] -- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) primary key was locked from within a subquery, like: `SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;`. The error could occur either under read-committed isolation, or with `optimizer_use_lock_op_for_serializable` enabled. [#137129][#137129] -- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. [#137676][#137676] -- Fixed a bug related to displaying the names of composite types in the `SHOW CREATE TABLES` command. The names are now shown as two-part names, which disambiguates the output and makes it more portable to other databases. [#137768][#137768] -- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. [#137705][#137705] -- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is active. [#137724][#137724] +- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. #136365 +- Fixed a bug where `CREATE RELATION / TYPE` could leave dangling namespace entries if the schema was concurrently being dropped. #136379 +- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. #136502 +- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. #136759 +- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. #136165 +- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. #136652 +- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`. The fix ensures proper behavior on cluster restarts. #137011 +- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. #137354 +- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. #137113 +- Fixed a bug that has existed since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. #137377 +- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) primary key was locked from within a subquery, like: `SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;`. The error could occur either under read-committed isolation, or with `optimizer_use_lock_op_for_serializable` enabled. #137129 +- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. #137676 +- Fixed a bug related to displaying the names of composite types in the `SHOW CREATE TABLES` command. The names are now shown as two-part names, which disambiguates the output and makes it more portable to other databases. #137768 +- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. #137705 +- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is active. #137724

Performance improvements

-- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. [#136161][#136161] +- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. #136161 -[#136016]: https://github.com/cockroachdb/cockroach/pull/136016 -[#136161]: https://github.com/cockroachdb/cockroach/pull/136161 -[#136165]: https://github.com/cockroachdb/cockroach/pull/136165 -[#136365]: https://github.com/cockroachdb/cockroach/pull/136365 -[#136379]: https://github.com/cockroachdb/cockroach/pull/136379 -[#136471]: https://github.com/cockroachdb/cockroach/pull/136471 -[#136479]: https://github.com/cockroachdb/cockroach/pull/136479 -[#136502]: https://github.com/cockroachdb/cockroach/pull/136502 -[#136603]: https://github.com/cockroachdb/cockroach/pull/136603 -[#136652]: https://github.com/cockroachdb/cockroach/pull/136652 -[#136681]: https://github.com/cockroachdb/cockroach/pull/136681 -[#136748]: https://github.com/cockroachdb/cockroach/pull/136748 -[#136759]: https://github.com/cockroachdb/cockroach/pull/136759 -[#136835]: https://github.com/cockroachdb/cockroach/pull/136835 -[#136928]: https://github.com/cockroachdb/cockroach/pull/136928 -[#137011]: https://github.com/cockroachdb/cockroach/pull/137011 -[#137025]: https://github.com/cockroachdb/cockroach/pull/137025 -[#137113]: https://github.com/cockroachdb/cockroach/pull/137113 -[#137116]: https://github.com/cockroachdb/cockroach/pull/137116 -[#137129]: https://github.com/cockroachdb/cockroach/pull/137129 -[#137354]: https://github.com/cockroachdb/cockroach/pull/137354 -[#137377]: https://github.com/cockroachdb/cockroach/pull/137377 -[#137461]: https://github.com/cockroachdb/cockroach/pull/137461 -[#137676]: https://github.com/cockroachdb/cockroach/pull/137676 -[#137700]: https://github.com/cockroachdb/cockroach/pull/137700 -[#137705]: https://github.com/cockroachdb/cockroach/pull/137705 -[#137724]: https://github.com/cockroachdb/cockroach/pull/137724 -[#137768]: https://github.com/cockroachdb/cockroach/pull/137768 -[#137920]: https://github.com/cockroachdb/cockroach/pull/137920 diff --git a/src/current/_includes/releases/v24.2/v24.2.9.md b/src/current/_includes/releases/v24.2/v24.2.9.md index c44148efec6..d535050e35e 100644 --- a/src/current/_includes/releases/v24.2/v24.2.9.md +++ b/src/current/_includes/releases/v24.2/v24.2.9.md @@ -6,6 +6,5 @@ Release Date: January 31, 2025

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. [#140176][#140176] +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. #140176 -[#140176]: https://github.com/cockroachdb/cockroach/pull/140176 diff --git a/src/current/_includes/releases/v24.3/backward-incompatible.md b/src/current/_includes/releases/v24.3/backward-incompatible.md index d81c2f57213..876267ce61f 100644 --- a/src/current/_includes/releases/v24.3/backward-incompatible.md +++ b/src/current/_includes/releases/v24.3/backward-incompatible.md @@ -2,9 +2,8 @@ Before [upgrading to CockroachDB v24.3]({% link v24.3/upgrade-cockroach-version. If you plan to upgrade to v24.3 directly from v24.1 and skip v24.2, be sure to also review the [v24.2 release notes]({% link releases/v24.2.md %}) for backward-incompatible changes from v24.1. -- Upgrading to v24.3 is blocked if no [license]({% link v24.3/licensing-faqs.md %}) is installed, or if a trial/free license is installed with telemetry disabled. [#130576][#130576] +- Upgrading to v24.3 is blocked if no [license]({% link v24.3/licensing-faqs.md %}) is installed, or if a trial/free license is installed with telemetry disabled. #130576 -[#130576]: https://github.com/cockroachdb/cockroach/pull/130576 {% comment %}Remove this anchor when it is added to the v24.3.0 GA release notes{% endcomment %} diff --git a/src/current/_includes/releases/v24.3/deprecations.md b/src/current/_includes/releases/v24.3/deprecations.md index abf7fd16114..d6ca47666aa 100644 --- a/src/current/_includes/releases/v24.3/deprecations.md +++ b/src/current/_includes/releases/v24.3/deprecations.md @@ -1,5 +1,4 @@ The following deprecations are announced in v24.3. If you plan to upgrade to v24.3 directly from v24.1 and skip v24.2, be sure to also review the [v24.2 release notes]({% link releases/v24.2.md %}) for deprecations. -- The session variable [`enforce_home_region_follower_reads_enabled`]({% link v24.3/session-variables.md %}#enforce-home-region-follower-reads-enabled) is now deprecated, and will be removed in a future release. The related session variable [`enforce_home_region`]({% link v24.3/session-variables.md %}#enforce-home-region) is **not** deprecated. [#129024][#129024] +- The session variable [`enforce_home_region_follower_reads_enabled`]({% link v24.3/session-variables.md %}#enforce-home-region-follower-reads-enabled) is now deprecated, and will be removed in a future release. The related session variable [`enforce_home_region`]({% link v24.3/session-variables.md %}#enforce-home-region) is **not** deprecated. #129024 -[#129024]: https://github.com/cockroachdb/cockroach/pull/129024 diff --git a/src/current/_includes/releases/v24.3/v24.3.0-alpha.1.md b/src/current/_includes/releases/v24.3/v24.3.0-alpha.1.md index 2d1df7a681d..4a12a7a58a1 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0-alpha.1.md +++ b/src/current/_includes/releases/v24.3/v24.3.0-alpha.1.md @@ -6,33 +6,33 @@ Release Date: October 9, 2024 {% include releases/new-release-downloads-docker-image.md release=include.release %}

Security updates

-- URLs in the [`CREATE CHANGEFEED`]({% link v24.3/create-changefeed.md %}) and [`CREATE SCHEDULE FOR CHANGEFEED`]({% link v24.3/create-schedule-for-changefeed.md %}) SQL statements are now sanitized of any secrets before being written to unredacted [logs]({% link v24.3/logging.md %}). [#126970][#126970] -- The LDAP [cluster settings]({% link v24.3/cluster-settings.md %}) `server.ldap_authentication.client.tls_certificate` and `server.ldap_authentication.client.tls_key` did not have callbacks installed to reload the settings value for LDAP authManager. This change fixes this by adding the necessary callbacks. [#131151][#131151] -- [Cluster settings]({% link v24.3/cluster-settings.md %}) for [host-based authentication]({% link v24.3/security-reference/authentication.md %}#authentication-configuration) configuration ([`server.host_based_authentication.configuration`]({% link v24.3/cluster-settings.md %}#setting-server-host-based-authentication-configuration)) and identity map configuration ([`server.identity_map.configuration`]({% link v24.3/cluster-settings.md %})) need to be redacted as they can be configured to contain LDAP bind usernames, passwords, and mapping of external identities to SQL users that are sensitive. These cluster settings can be configured for redaction via the `server.redact_sensitive_settings.enabled` cluster setting. [#131150][#131150] -- Added support for configuring authorization using LDAP. During login, the list of groups that a user belongs to are fetched from the LDAP server. These groups are mapped to [SQL roles]({% link v24.3/create-role.md %}) by extracting the common name (CN) from the group. After authenticating the user, the login flow grants these roles to the user, and revokes any other roles that are not returned by the LDAP server. The groups given by the LDAP server are treated as the sole source of truth for role memberships, so any roles that were manually granted to the user will not remain in place. [#131043][#131043] +- URLs in the [`CREATE CHANGEFEED`]({% link v24.3/create-changefeed.md %}) and [`CREATE SCHEDULE FOR CHANGEFEED`]({% link v24.3/create-schedule-for-changefeed.md %}) SQL statements are now sanitized of any secrets before being written to unredacted [logs]({% link v24.3/logging.md %}). #126970 +- The LDAP [cluster settings]({% link v24.3/cluster-settings.md %}) `server.ldap_authentication.client.tls_certificate` and `server.ldap_authentication.client.tls_key` did not have callbacks installed to reload the settings value for LDAP authManager. This change fixes this by adding the necessary callbacks. #131151 +- [Cluster settings]({% link v24.3/cluster-settings.md %}) for [host-based authentication]({% link v24.3/security-reference/authentication.md %}#authentication-configuration) configuration ([`server.host_based_authentication.configuration`]({% link v24.3/cluster-settings.md %}#setting-server-host-based-authentication-configuration)) and identity map configuration ([`server.identity_map.configuration`]({% link v24.3/cluster-settings.md %})) need to be redacted as they can be configured to contain LDAP bind usernames, passwords, and mapping of external identities to SQL users that are sensitive. These cluster settings can be configured for redaction via the `server.redact_sensitive_settings.enabled` cluster setting. #131150 +- Added support for configuring authorization using LDAP. During login, the list of groups that a user belongs to are fetched from the LDAP server. These groups are mapped to [SQL roles]({% link v24.3/create-role.md %}) by extracting the common name (CN) from the group. After authenticating the user, the login flow grants these roles to the user, and revokes any other roles that are not returned by the LDAP server. The groups given by the LDAP server are treated as the sole source of truth for role memberships, so any roles that were manually granted to the user will not remain in place. #131043 - Previously, the [host-based authentication]({% link v24.3/security-reference/authentication.md %}#authentication-configuration) (HBA) configuration cluster setting [`server.host_based_authentication.configuration`]({% link v24.3/cluster-settings.md %}#setting-server-host-based-authentication-configuration) was unable to handle double quotes in authentication method option values. For example, for the following entry: ~~~ host all all all ldap ldapserver=ldap.example.com ldapport=636 ldapbasedn="ou=users,dc=example,dc=com" ldapbinddn="cn=readonly,dc=example,dc=com" ldapbindpasswd=readonly_password ldapsearchattribute=uid ldapsearchfilter="(memberof=cn=cockroachdb_users,ou=groups,dc=example,dc=com)" ~~~ - The HBA parser would fail after incorrectly determining `ldapbinddn="cn=readonly,dc=example,dc=com"` as 2 separate options (`ldapbinddn=and cn=readonly,dc=example,dc=com`). Now, the 2 tokens are set as key and value respectively for the same HBA configuration option. [#131480][#131480] + The HBA parser would fail after incorrectly determining `ldapbinddn="cn=readonly,dc=example,dc=com"` as 2 separate options (`ldapbinddn=and cn=readonly,dc=example,dc=com`). Now, the 2 tokens are set as key and value respectively for the same HBA configuration option. #131480

General changes

-- CockroachDB will now avoid [logging]({% link v24.3/logging.md %}) unnecessary stack traces while executing [scheduled jobs]({% link v24.3/show-jobs.md %}). [#129846][#129846] -- Upgrading to 24.3 is blocked if no [license]({% link v24.3/licensing-faqs.md %}) is installed, or if a trial/free license is installed with telemetry disabled. [#130576][#130576] -- Attempting to install a second Enterprise trial license on the same cluster will now fail. [#131422][#131422] -- Changed the license `cockroach` is distributed under to the new CockroachDB Software License (CSL). [#131690][#131690] [#131686][#131686] [#131688][#131688] [#131687][#131687] [#131717][#131717] [#131689][#131689] [#131693][#131693] [#131691][#131691] [#131777][#131777] [#131778][#131778] [#131661][#131661] +- CockroachDB will now avoid [logging]({% link v24.3/logging.md %}) unnecessary stack traces while executing [scheduled jobs]({% link v24.3/show-jobs.md %}). #129846 +- Upgrading to 24.3 is blocked if no [license]({% link v24.3/licensing-faqs.md %}) is installed, or if a trial/free license is installed with telemetry disabled. #130576 +- Attempting to install a second Enterprise trial license on the same cluster will now fail. #131422 +- Changed the license `cockroach` is distributed under to the new CockroachDB Software License (CSL). #131690 #131686 #131688 #131687 #131717 #131689 #131693 #131691 #131777 #131778 #131661

{{ site.data.products.enterprise }} edition changes

-- Added a `CompressionLevel` field to the changefeed [`kafka_sink_config`]({% link v24.3/changefeed-sinks.md %}#kafka-sink-configuration) option. [Changefeeds]({% link v24.3/change-data-capture-overview.md %}) will use this compression level when emitting events to a [Kafka sink]({% link v24.3/changefeed-sinks.md %}#kafka). The possible values depend on a compression codec. The `CompressionLevel` field optimizes for faster or stronger level of [compression]({% link v24.3/changefeed-sinks.md %}#kafka-sink-configuration). [#125456][#125456] -- The updated version of the [CockroachDB changefeed Kafka sink implementation]({% link v24.3/changefeed-sinks.md %}#kafka) now supports specifying compression levels. [#127827][#127827] -- Introduced the cluster setting [`server.jwt_authentication.client.timeout`]({% link v24.3/cluster-settings.md %}) to capture the HTTP client timeout for external calls made during [JWT authentication]({% link v24.3/sso-sql.md %}). [#127145][#127145] -- The JWT authentication [cluster settings]({% link v24.3/cluster-settings.md %}) have been made `public`. [#128170][#128170] -- Updated certain error messages to refer to the `stable` docs tree rather than an explicit version. [#128842][#128842] -- Disambiguated [metrics]({% link v24.3/essential-metrics-self-hosted.md %}) and logs for the two buffers used by the KV feed. The affected metrics now have a suffix indicating which buffer they correspond to: `changefeed.buffer_entries.*`, `changefeed.buffer_entries_mem.*`, `changefeed.buffer_pushback_nanos.*`. The previous versions are still supported for backward compatibility, though using the new format is recommended. [#128813][#128813] +- Added a `CompressionLevel` field to the changefeed [`kafka_sink_config`]({% link v24.3/changefeed-sinks.md %}#kafka-sink-configuration) option. [Changefeeds]({% link v24.3/change-data-capture-overview.md %}) will use this compression level when emitting events to a [Kafka sink]({% link v24.3/changefeed-sinks.md %}#kafka). The possible values depend on a compression codec. The `CompressionLevel` field optimizes for faster or stronger level of [compression]({% link v24.3/changefeed-sinks.md %}#kafka-sink-configuration). #125456 +- The updated version of the [CockroachDB changefeed Kafka sink implementation]({% link v24.3/changefeed-sinks.md %}#kafka) now supports specifying compression levels. #127827 +- Introduced the cluster setting [`server.jwt_authentication.client.timeout`]({% link v24.3/cluster-settings.md %}) to capture the HTTP client timeout for external calls made during [JWT authentication]({% link v24.3/sso-sql.md %}). #127145 +- The JWT authentication [cluster settings]({% link v24.3/cluster-settings.md %}) have been made `public`. #128170 +- Updated certain error messages to refer to the `stable` docs tree rather than an explicit version. #128842 +- Disambiguated [metrics]({% link v24.3/essential-metrics-self-hosted.md %}) and logs for the two buffers used by the KV feed. The affected metrics now have a suffix indicating which buffer they correspond to: `changefeed.buffer_entries.*`, `changefeed.buffer_entries_mem.*`, `changefeed.buffer_pushback_nanos.*`. The previous versions are still supported for backward compatibility, though using the new format is recommended. #128813 - Added support for authorization to a CockroachDB cluster via LDAP, retrieving AD groups membership information for LDAP user. The new [HBA configuration]({% link v24.3/security-reference/authentication.md %}#authentication-configuration) cluster setting option `ldapgrouplistfilter` performs filtered search query on LDAP for matching groups. An example HBA configuration entry to support LDAP authZ configuration: {% include_cached copy-clipboard.html %} @@ -53,47 +53,47 @@ Release Date: October 9, 2024 Post configuration, the CockroachDB cluster should be able to authorize users via LDAP server if: 1. Users LDAP authentication attempt is successful, and it has the user's DN for the LDAP server. - 1. `ldapgrouplistfilter` is properly configured, and it successfully syncs groups of the user. [#128498][#128498] -- Added changefeed support for the [`mvcc_timestamp`]({% link v24.3/create-changefeed.md %}#mvcc-timestamp) option when the changefeed is emitting in [`avro`]({% link v24.3/changefeed-messages.md %}#avro) format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's [MVCC timestamp]({% link v24.3/architecture/storage-layer.md %}#mvcc) with the row data. [#129840][#129840] -- Updated the cluster setting [`changefeed.sink_io_workers`]({% link v24.3/cluster-settings.md %}#setting-changefeed-sink-io-workers) with all the [sinks]({% link v24.3/changefeed-sinks.md %}) that support the setting. [#129946][#129946] -- Added a LDAP authentication method to complement password-based login for the [DB Console]({% link v24.3/ui-overview.md %}) if HBA configuration has an entry for LDAP for the user attempting login, along with other matching criteria (like the requests originating IP address) for authentication to the DB Console. [#130418][#130418] -- Added timers around key parts of the [changefeed]({% link v24.3/change-data-capture-overview.md %}) pipeline to help debug feeds experiencing issues. The `changefeed.stage..latency` metrics now emit latency histograms for each stage. The metric respects the [changefeed `scope` label]({% link v24.3/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) for debugging specific feeds. [#128794][#128794] -- For [enterprise changefeeds]({% link v24.3/how-does-a-changefeed-work.md %}), [events]({% link v24.3/eventlog.md %}) `changefeed_failed` and `create_changefeed` now include a `JobId` field. [#131396][#131396] -- The new [metric]({% link v24.3/metrics.md %}) `seconds_until_license_expiry` allows you to monitor the status of a cluster's Enterprise license. [#129052][#129052]. -- Added the `changefeed.total_ranges` metric, which [monitors]({% link v24.3/monitor-and-debug-changefeeds.md %}) the number of [ranges]({% link v24.3/architecture/overview.md %}#architecture-range) that are watched by [changefeed aggregators]({% link v24.3/how-does-a-changefeed-work.md %}). It shares the same polling interval as [`changefeed.lagging_ranges`]({% link v24.3/advanced-changefeed-configuration.md %}#lagging-ranges), which is controlled by the existing `lagging_ranges_polling_interval` option. [#130897][#130897] + 1. `ldapgrouplistfilter` is properly configured, and it successfully syncs groups of the user. #128498 +- Added changefeed support for the [`mvcc_timestamp`]({% link v24.3/create-changefeed.md %}#mvcc-timestamp) option when the changefeed is emitting in [`avro`]({% link v24.3/changefeed-messages.md %}#avro) format. If both options are specified, the Avro schema includes an `mvcc_timestamp` metadata field and emits the row's [MVCC timestamp]({% link v24.3/architecture/storage-layer.md %}#mvcc) with the row data. #129840 +- Updated the cluster setting [`changefeed.sink_io_workers`]({% link v24.3/cluster-settings.md %}#setting-changefeed-sink-io-workers) with all the [sinks]({% link v24.3/changefeed-sinks.md %}) that support the setting. #129946 +- Added a LDAP authentication method to complement password-based login for the [DB Console]({% link v24.3/ui-overview.md %}) if HBA configuration has an entry for LDAP for the user attempting login, along with other matching criteria (like the requests originating IP address) for authentication to the DB Console. #130418 +- Added timers around key parts of the [changefeed]({% link v24.3/change-data-capture-overview.md %}) pipeline to help debug feeds experiencing issues. The `changefeed.stage..latency` metrics now emit latency histograms for each stage. The metric respects the [changefeed `scope` label]({% link v24.3/monitor-and-debug-changefeeds.md %}#using-changefeed-metrics-labels) for debugging specific feeds. #128794 +- For [enterprise changefeeds]({% link v24.3/how-does-a-changefeed-work.md %}), [events]({% link v24.3/eventlog.md %}) `changefeed_failed` and `create_changefeed` now include a `JobId` field. #131396 +- The new [metric]({% link v24.3/metrics.md %}) `seconds_until_license_expiry` allows you to monitor the status of a cluster's Enterprise license. #129052. +- Added the `changefeed.total_ranges` metric, which [monitors]({% link v24.3/monitor-and-debug-changefeeds.md %}) the number of [ranges]({% link v24.3/architecture/overview.md %}#architecture-range) that are watched by [changefeed aggregators]({% link v24.3/how-does-a-changefeed-work.md %}). It shares the same polling interval as [`changefeed.lagging_ranges`]({% link v24.3/advanced-changefeed-configuration.md %}#lagging-ranges), which is controlled by the existing `lagging_ranges_polling_interval` option. #130897

SQL language changes

-- Added a session setting, [`optimizer_use_merged_partial_statistics`]({% link v24.3/session-variables.md %}) which defaults to `false`. When set to `true`, it enables usage of existing partial [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) merged with full statistics when [optimizing]({% link v24.3/cost-based-optimizer.md %}) a query. [#126948][#126948] -- The [`enable_create_stats_using_extremes`]({% link v24.3/session-variables.md %}) session setting is now `true` by default. Partial statistics at extremes can be collected using the [`CREATE STATISTICS ON FROM USING EXTREMES`]({% link v24.3/create-statistics.md %}) syntax. [#127850][#127850] -- Added [`SHOW SCHEMAS WITH COMMENT`]({% link v24.3/show-schemas.md %}) and `SHOW SCHEMAS FROM database_name WITH COMMENT` functionality similar to [`SHOW TABLES`]({% link v24.3/show-tables.md %}) and [`SHOW DATABASES`]({% link v24.3/show-databases.md %}). [#127816][#127816] -- The [`deadlock_timeout` session variable]({% link v24.3/session-variables.md %}) is now supported. The configuration can be used to specify the time to wait on a lock before pushing the lock holder for deadlock detection. It can be set at session granularity. [#128506][#128506] -- Partial statistics at extremes can now be collected on all valid columns of a table using the [`CREATE STATISTICS ` `FROM USING EXTREMES`]({% link v24.3/create-statistics.md %}) syntax, without an `ON ` clause. Valid columns are all single column prefixes of a forward [index]({% link v24.3/indexes.md %}) excluding partial, sharded, and implicitly partitioned indexes. [#127836][#127836] +- Added a session setting, [`optimizer_use_merged_partial_statistics`]({% link v24.3/session-variables.md %}) which defaults to `false`. When set to `true`, it enables usage of existing partial [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) merged with full statistics when [optimizing]({% link v24.3/cost-based-optimizer.md %}) a query. #126948 +- The [`enable_create_stats_using_extremes`]({% link v24.3/session-variables.md %}) session setting is now `true` by default. Partial statistics at extremes can be collected using the [`CREATE STATISTICS ON FROM USING EXTREMES`]({% link v24.3/create-statistics.md %}) syntax. #127850 +- Added [`SHOW SCHEMAS WITH COMMENT`]({% link v24.3/show-schemas.md %}) and `SHOW SCHEMAS FROM database_name WITH COMMENT` functionality similar to [`SHOW TABLES`]({% link v24.3/show-tables.md %}) and [`SHOW DATABASES`]({% link v24.3/show-databases.md %}). #127816 +- The [`deadlock_timeout` session variable]({% link v24.3/session-variables.md %}) is now supported. The configuration can be used to specify the time to wait on a lock before pushing the lock holder for deadlock detection. It can be set at session granularity. #128506 +- Partial statistics at extremes can now be collected on all valid columns of a table using the [`CREATE STATISTICS ` `FROM USING EXTREMES`]({% link v24.3/create-statistics.md %}) syntax, without an `ON ` clause. Valid columns are all single column prefixes of a forward [index]({% link v24.3/indexes.md %}) excluding partial, sharded, and implicitly partitioned indexes. #127836 - Partial [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) can now be automatically collected at the extremes of indexes when a certain fraction and minimum number of rows are stale (by default 5% and 100 respectively). These can be configured with new [table storage parameters]({% link v24.3/alter-table.md %}#set-and-reset-storage-parameters) and [cluster settings]({% link v24.3/cluster-settings.md %}), and the feature is disabled by default. The new cluster settings and table parameters are: - `sql.stats.automatic_partial_collection.enabled`/`sql_stats_automatic_partial_collection_enabled`, defaults to `false`. - `sql.stats.automatic_partial_collection.min_stale_rows`/`sql_stats_automatic_partial_collection_min_stale_rows`, defaults to `100`. - - `sql.stats.automatic_partial_collection.fraction_stale_rows `/`sql_stats_automatic_partial_collection_fraction_stale_rows`, Defaults to `0.05`. [#93067][#93067] -- The session variable [`enforce_home_region_follower_reads_enabled`]({% link v24.3/session-variables.md %}#enforce-home-region-follower-reads-enabled) is now deprecated, and will be removed in a future release. The related session variable [`enforce_home_region`]({% link v24.3/session-variables.md %}#enforce-home-region) is **not** deprecated. [#129024][#129024] -- Added a new [cluster setting]({% link v24.3/cluster-settings.md %}) to control whether most common values are collected as part of [histogram collection]({% link v24.3/cost-based-optimizer.md %}#control-histogram-collection) for use by the [optimizer]({% link v24.3/cost-based-optimizer.md %}). The setting is called `sql.stats.histogram_buckets.include_most_common_values.enabled`. When enabled, the histogram collection logic will ensure that the most common sampled values are represented as histogram bucket upper bounds. Since histograms in CockroachDB track the number of elements equal to the upper bound in addition to the number of elements less, this allows the optimizer to identify the most common values in the histogram and better estimate the rows processed by a query plan. To set the number of most common values to include in a histogram, a second setting `sql.stats.histogram_buckets.max_fraction_most_common_values` was added. Currently, the default is `0.1`, or `10%` of the number of buckets. With a 200 bucket histogram, by default, at most 20 buckets may be adjusted to include a most common value as the upper bound. [#129378][#129378] -- Added a new column to [`crdb_internal.table_spans`]({% link v24.3/crdb-internal.md %}#tables) to indicate whether a table is [dropped]({% link v24.3/drop-table.md %}). Rows for dropped tables will be removed once they are [garbage collected]({% link v24.3/architecture/storage-layer.md %}#garbage-collection). [#128788][#128788] + - `sql.stats.automatic_partial_collection.fraction_stale_rows `/`sql_stats_automatic_partial_collection_fraction_stale_rows`, Defaults to `0.05`. #93067 +- The session variable [`enforce_home_region_follower_reads_enabled`]({% link v24.3/session-variables.md %}#enforce-home-region-follower-reads-enabled) is now deprecated, and will be removed in a future release. The related session variable [`enforce_home_region`]({% link v24.3/session-variables.md %}#enforce-home-region) is **not** deprecated. #129024 +- Added a new [cluster setting]({% link v24.3/cluster-settings.md %}) to control whether most common values are collected as part of [histogram collection]({% link v24.3/cost-based-optimizer.md %}#control-histogram-collection) for use by the [optimizer]({% link v24.3/cost-based-optimizer.md %}). The setting is called `sql.stats.histogram_buckets.include_most_common_values.enabled`. When enabled, the histogram collection logic will ensure that the most common sampled values are represented as histogram bucket upper bounds. Since histograms in CockroachDB track the number of elements equal to the upper bound in addition to the number of elements less, this allows the optimizer to identify the most common values in the histogram and better estimate the rows processed by a query plan. To set the number of most common values to include in a histogram, a second setting `sql.stats.histogram_buckets.max_fraction_most_common_values` was added. Currently, the default is `0.1`, or `10%` of the number of buckets. With a 200 bucket histogram, by default, at most 20 buckets may be adjusted to include a most common value as the upper bound. #129378 +- Added a new column to [`crdb_internal.table_spans`]({% link v24.3/crdb-internal.md %}#tables) to indicate whether a table is [dropped]({% link v24.3/drop-table.md %}). Rows for dropped tables will be removed once they are [garbage collected]({% link v24.3/architecture/storage-layer.md %}#garbage-collection). #128788 - Added the [cluster setting]({% link v24.3/cluster-settings.md %}) `sql.txn.repeatable_read_isolation.enabled`, which defaults to` false`. When set to `true`, the following statements will configure transactions to run under `REPEATABLE READ` isolation, rather than being automatically interpreted as [`SERIALIZABLE`]({% link v24.3/demo-serializable.md %}): - `BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ` - `SET TRANSACTION ISOLATION LEVEL REPEATABLE READ` - `SET default_transaction_isolation = 'repeatable read'` - `SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ` - This setting was added since [`REPEATABLE READ` transactions]({% link v24.3/transactions.md %}#comparison-to-ansi-sql-isolation-levels) is a [preview]({% link v24.3/cockroachdb-feature-availability.md %}) feature, so usage of it is opt-in for v24.3. In a future CockroachDB major version, this setting will change to default to `true`. [#130089][#130089] -- Previously, [`SHOW CHANGEFEED JOBS`]({% link v24.3/show-jobs.md %}#show-changefeed-jobs) showed the changefeed jobs for the last 14 days by default. Now, it uses the same age filter for [`SHOW JOBS`]({% link v24.3/show-jobs.md %}), which shows jobs from the last 12 hours by default. [#127584][#127584] -- Set the default for session variable [`large_full_scan_rows`]({% link v24.3/session-variables.md %}#large-full-scan-rows) to `0`. This means that by default, [`disallow_full_table_scans`]({% link v24.3/session-variables.md %}#disallow-full-table-scans) will disallow **all** [full table scans]({% link v24.3/show-full-table-scans.md %}), even full scans on very small tables. If `large_full_scan_rows` is set > 0, `disallow_full_table_scans` will allow full scans estimated to read fewer than `large_full_scan_rows`. [#131040][#131040] -- It is now possible to create [PL/pgSQL]({% link v24.3/plpgsql.md %}) trigger functions, which can be executed by a trigger in response to table mutation events. Note that this patch does not add support for triggers, only trigger functions. [#126734][#126734] -- Cluster settings [`enterprise.license`]({% link v24.3/cluster-settings.md %}#setting-enterprise-license) and [`diagnostics.reporting.enabled`]({% link v24.3/cluster-settings.md %}#setting-diagnostics-reporting-enabled) now have additional validation. [#131097][#131097] -- The [`SHOW SESSIONS`]({% link v24.3/show-sessions.md %}) command was changed to include an `authentication_method` column in the result. This column will show the method used to authenticate the session, for example, `password`, `cert`, `LDAP`, etc. [#131625][#131625] + This setting was added since [`REPEATABLE READ` transactions]({% link v24.3/transactions.md %}#comparison-to-ansi-sql-isolation-levels) is a [preview]({% link v24.3/cockroachdb-feature-availability.md %}) feature, so usage of it is opt-in for v24.3. In a future CockroachDB major version, this setting will change to default to `true`. #130089 +- Previously, [`SHOW CHANGEFEED JOBS`]({% link v24.3/show-jobs.md %}#show-changefeed-jobs) showed the changefeed jobs for the last 14 days by default. Now, it uses the same age filter for [`SHOW JOBS`]({% link v24.3/show-jobs.md %}), which shows jobs from the last 12 hours by default. #127584 +- Set the default for session variable [`large_full_scan_rows`]({% link v24.3/session-variables.md %}#large-full-scan-rows) to `0`. This means that by default, [`disallow_full_table_scans`]({% link v24.3/session-variables.md %}#disallow-full-table-scans) will disallow **all** [full table scans]({% link v24.3/show-full-table-scans.md %}), even full scans on very small tables. If `large_full_scan_rows` is set > 0, `disallow_full_table_scans` will allow full scans estimated to read fewer than `large_full_scan_rows`. #131040 +- It is now possible to create [PL/pgSQL]({% link v24.3/plpgsql.md %}) trigger functions, which can be executed by a trigger in response to table mutation events. Note that this patch does not add support for triggers, only trigger functions. #126734 +- Cluster settings [`enterprise.license`]({% link v24.3/cluster-settings.md %}#setting-enterprise-license) and [`diagnostics.reporting.enabled`]({% link v24.3/cluster-settings.md %}#setting-diagnostics-reporting-enabled) now have additional validation. #131097 +- The [`SHOW SESSIONS`]({% link v24.3/show-sessions.md %}) command was changed to include an `authentication_method` column in the result. This column will show the method used to authenticate the session, for example, `password`, `cert`, `LDAP`, etc. #131625

Operational changes

-- [Events]({% link v24.3/eventlog.md %}) `DiskSlownessDetected` and `DiskSlownessCleared` are now logged when disk slowness is detected and cleared on a store. [#127025][#127025] -- Several [cluster settings]({% link v24.3/cluster-settings.md %}) allow you to configure rate-limiting traffic to cloud storage over various protocols. These settings begin with `cloudstorage`. [#127207][#127207] -- The new [cluster setting]({% link v24.3/cluster-settings.md %}) `kv.range.range_size_hard_cap` allows you to limit how large a [range]({% link v24.3/architecture/overview.md %}#architecture-range) can grow before [backpressure]({% link v24.3/common-errors.md %}#split-failed-while-applying-backpressure-are-rows-updated-in-a-tight-loop) is applied. This can help to mitigate against a situation where a range cannot be split, such as when a range is comprised of a single key due to an issue with the schema or workload pattern or a bug in client application code. The default is 8 GiB, which is 16 times the default max range size. If you have changed the max range size, you may need to adjust this cluster setting or reduce the range size. [#129450][#129450] +- [Events]({% link v24.3/eventlog.md %}) `DiskSlownessDetected` and `DiskSlownessCleared` are now logged when disk slowness is detected and cleared on a store. #127025 +- Several [cluster settings]({% link v24.3/cluster-settings.md %}) allow you to configure rate-limiting traffic to cloud storage over various protocols. These settings begin with `cloudstorage`. #127207 +- The new [cluster setting]({% link v24.3/cluster-settings.md %}) `kv.range.range_size_hard_cap` allows you to limit how large a [range]({% link v24.3/architecture/overview.md %}#architecture-range) can grow before [backpressure]({% link v24.3/common-errors.md %}#split-failed-while-applying-backpressure-are-rows-updated-in-a-tight-loop) is applied. This can help to mitigate against a situation where a range cannot be split, such as when a range is comprised of a single key due to an issue with the schema or workload pattern or a bug in client application code. The default is 8 GiB, which is 16 times the default max range size. If you have changed the max range size, you may need to adjust this cluster setting or reduce the range size. #129450 - The following `kvflowcontrol` [metrics]({% link v24.3/metrics.md %}) have been renamed. After a cluster is finalized on v24.3, old and new metrics will be populated. The previous metrics under `kvasdmission.flow_controller` will be removed. Old metric names | New metric names @@ -120,16 +120,16 @@ Release Date: October 9, 2024 `kvadmission.flow_controller.elastic_wait_duration` | `kvflowcontrol.eval_wait.elastic.duration` - [#130167][#130167] + #130167 -- The new `ranges.decommissioning` [metric]({% link v24.3/metrics.md %}) shows the number of ranges with a replica on a [decommissioning]({% link v24.3/node-shutdown.md %}) node. [#130117][#130117] +- The new `ranges.decommissioning` [metric]({% link v24.3/metrics.md %}) shows the number of ranges with a replica on a [decommissioning]({% link v24.3/node-shutdown.md %}) node. #130117 - New [cluster settings]({% link v24.3/cluster-settings.md %}) have been added which control the refresh behavior for the cached data in the **Databases** page of the [DB Console]({% link v24.3/ui-overview.md %}): - `obs.tablemetadatacache.data_valid_duration`: the duration for which the data in `system.table_metadata` is considered valid before a cache reset will occur. Default: 20 minutes. - `obs.tablemetadatacache.automatic_updates.enabled`: whether to automatically update the cache according the validity interval. Default: `false`. - [#130198][#130198] -- New gauge [metrics]({% link v24.3/metrics.md %}) `security.certificate.expiration.{cert-type}` and `security.certificate.ttl.{cert-type}` show the expiration and TTL for a certificate. [#130110][#130110] -- To set the [logging format]({% link v24.3/log-formats.md %}) for `stderr`, you can now set the `format` field to any valid format, rather than only `crdb-v2-tty`. [#131529][#131529] + #130198 +- New gauge [metrics]({% link v24.3/metrics.md %}) `security.certificate.expiration.{cert-type}` and `security.certificate.ttl.{cert-type}` show the expiration and TTL for a certificate. #130110 +- To set the [logging format]({% link v24.3/log-formats.md %}) for `stderr`, you can now set the `format` field to any valid format, rather than only `crdb-v2-tty`. #131529 - The following new [metrics]({% link v24.3/metrics.md %}) show connection latency for each SQL authentication method: Authentication method | Metric @@ -141,209 +141,90 @@ Release Date: October 9, 2024 Password | `auth_password_conn_latency` SCRAM SHA-256 | `auth_scram_conn_latency` - [#131578][#131578] + #131578 -- Verbose logging of slow [Pebble]({% link v24.3/architecture/storage-layer.md %}#pebble) reads can no longer be enabled via the shorthand flag `--vmodule=pebble_logger_and_tracer=2`, where `pebble_logger_and_tracer` contains the CockroachDB implementation of the logger needed by Pebble. Instead, you must list the Pebble files that contain the log statements. For example `--vmodule=reader=2,table=2`. [#127066][#127066] -- The lowest [admission control]({% link v24.3/admission-control.md %}) priority for the storage layer has been renamed from `ttl-low-pri` to `bulk-low-pri`. [#129564][#129564] -- New clusters will now have a [zone configuration]({% link v24.3/show-zone-configurations.md %}) defined for the `timeseries` range, which specifies `gc.ttlseconds` and inherits all other attributes from the zone config of the `default` range. This zone config will also be added to a cluster that is [upgraded]({% link v24.3/upgrade-cockroach-version.md %}) to v24.3 if it does not already have a zone config defined.[#128032][#128032] +- Verbose logging of slow [Pebble]({% link v24.3/architecture/storage-layer.md %}#pebble) reads can no longer be enabled via the shorthand flag `--vmodule=pebble_logger_and_tracer=2`, where `pebble_logger_and_tracer` contains the CockroachDB implementation of the logger needed by Pebble. Instead, you must list the Pebble files that contain the log statements. For example `--vmodule=reader=2,table=2`. #127066 +- The lowest [admission control]({% link v24.3/admission-control.md %}) priority for the storage layer has been renamed from `ttl-low-pri` to `bulk-low-pri`. #129564 +- New clusters will now have a [zone configuration]({% link v24.3/show-zone-configurations.md %}) defined for the `timeseries` range, which specifies `gc.ttlseconds` and inherits all other attributes from the zone config of the `default` range. This zone config will also be added to a cluster that is [upgraded]({% link v24.3/upgrade-cockroach-version.md %}) to v24.3 if it does not already have a zone config defined.#128032

Command-line changes

-- [`cockroach debug tsdump`]({% link v24.3/cockroach-debug-tsdump.md %}) now includes all the available resolutions in the time range supplied by the user. [#127186][#127186] -- Added the flag `--tenant-name-scope` to the [`cert create-client`]({% link v24.3/cockroach-cert.md %}#create-the-certificate-and-key-pair-for-a-client) command. This allows users to generate tenant-scoped [client certificates]({% link v24.3/authentication.md %}#client-authentication) using tenant names in addition to tenant IDs. [#129216][#129216] +- [`cockroach debug tsdump`]({% link v24.3/cockroach-debug-tsdump.md %}) now includes all the available resolutions in the time range supplied by the user. #127186 +- Added the flag `--tenant-name-scope` to the [`cert create-client`]({% link v24.3/cockroach-cert.md %}#create-the-certificate-and-key-pair-for-a-client) command. This allows users to generate tenant-scoped [client certificates]({% link v24.3/authentication.md %}#client-authentication) using tenant names in addition to tenant IDs. #129216

DB Console changes

-- If a [range]({% link v24.3/architecture/overview.md %}#architecture-range) is larger than twice the max range size, it will now display in the [**Problem Ranges** page]({% link v24.3/ui-debug-pages.md %}) in the DB Console. [#129001][#129001] -- Updated some metric charts on the [Overview]({% link v24.3/ui-overview-dashboard.md %}) and [Replication]({% link v24.3/ui-replication-dashboard.md %}) dashboards to omit verbose details in the legends for easier browsing. [#129149][#129149] -- Updated the icon for notification alerts to use the new CockroachDB logo. [#130333][#130333] -- The `txn.restarts.writetoooldmulti` metric was rolled into the `txn.restarts.writetooold` metric in the v24.1.0-alpha.1 release. `txn.restarts.writetoooldmulti` has now been removed altogether. [#131642][#131642] -- The grants table in the [DB Details]({% link v24.3/ui-databases-page.md %}#databases) page will now show the database level grants. For example, when clicking a database in the databases list. Previously, it showed grants per table in the database. [#131250][#131250] -- Added new database pages that are available from the side navigation **Databases** link. [#131594][#131594] -- The [DB Console]({% link v24.3/ui-overview.md %}) will reflect any throttling behavior from the cluster due to an expired license or missing telemetry data. Enterprise licenses are not affected. [#131326][#131326] -- Users can hover over the node/region cell in multi-region deployments to view a list of nodes the database or table is on. [#130704][#130704] -- The [**Databases** pages]({% link v24.3/ui-databases-page.md %}) in the DB console have been updated to read cached metadata about database and table storage statistics. The cache update time is now displayed in the top right-hand corner of the database and tables list pages. Users may trigger a cache refresh with the **refresh** icon next to the last updated time. The cache will also update automatically when users visit a **Databases** page and the cache is older than or equal to 20 minutes. [#131463][#131463] +- If a [range]({% link v24.3/architecture/overview.md %}#architecture-range) is larger than twice the max range size, it will now display in the [**Problem Ranges** page]({% link v24.3/ui-debug-pages.md %}) in the DB Console. #129001 +- Updated some metric charts on the [Overview]({% link v24.3/ui-overview-dashboard.md %}) and [Replication]({% link v24.3/ui-replication-dashboard.md %}) dashboards to omit verbose details in the legends for easier browsing. #129149 +- Updated the icon for notification alerts to use the new CockroachDB logo. #130333 +- The `txn.restarts.writetoooldmulti` metric was rolled into the `txn.restarts.writetooold` metric in the v24.1.0-alpha.1 release. `txn.restarts.writetoooldmulti` has now been removed altogether. #131642 +- The grants table in the [DB Details]({% link v24.3/ui-databases-page.md %}#databases) page will now show the database level grants. For example, when clicking a database in the databases list. Previously, it showed grants per table in the database. #131250 +- Added new database pages that are available from the side navigation **Databases** link. #131594 +- The [DB Console]({% link v24.3/ui-overview.md %}) will reflect any throttling behavior from the cluster due to an expired license or missing telemetry data. Enterprise licenses are not affected. #131326 +- Users can hover over the node/region cell in multi-region deployments to view a list of nodes the database or table is on. #130704 +- The [**Databases** pages]({% link v24.3/ui-databases-page.md %}) in the DB console have been updated to read cached metadata about database and table storage statistics. The cache update time is now displayed in the top right-hand corner of the database and tables list pages. Users may trigger a cache refresh with the **refresh** icon next to the last updated time. The cache will also update automatically when users visit a **Databases** page and the cache is older than or equal to 20 minutes. #131463

Bug fixes

-- Fixed a bug where CockroachDB could incorrectly evaluate an `IS NOT NULL` [filter]({% link v24.3/select-clause.md %}#filter-rows) if it was applied to non-`NULL` tuples that had `NULL` elements (like `(1, NULL)` or `(NULL, NULL)`). The bug was present since v20.2. [#126901][#126901] -- Fixed a bug related to displaying the names of composite types in the [`SHOW CREATE TABLES`]({% link v24.3/show-create.md %}) command. The names are now shown as two-part names, which disambiguates the output and makes it more portable to other databases. [#127158][#127158] -- The `CONCAT()` [built-in function]({% link v24.3/functions-and-operators.md %}) now accepts arguments of any data type. [#127098][#127098] -- Fixed a bug that prevented merged [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) from being created after injecting statistics or recreating [statement bundles]({% link v24.3/explain-analyze.md %}). This would occur when the injected statistics or statement bundle contained related full and partial statistics. [#127252][#127252] +- Fixed a bug where CockroachDB could incorrectly evaluate an `IS NOT NULL` [filter]({% link v24.3/select-clause.md %}#filter-rows) if it was applied to non-`NULL` tuples that had `NULL` elements (like `(1, NULL)` or `(NULL, NULL)`). The bug was present since v20.2. #126901 +- Fixed a bug related to displaying the names of composite types in the [`SHOW CREATE TABLES`]({% link v24.3/show-create.md %}) command. The names are now shown as two-part names, which disambiguates the output and makes it more portable to other databases. #127158 +- The `CONCAT()` [built-in function]({% link v24.3/functions-and-operators.md %}) now accepts arguments of any data type. #127098 +- Fixed a bug that prevented merged [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) from being created after injecting statistics or recreating [statement bundles]({% link v24.3/explain-analyze.md %}). This would occur when the injected statistics or statement bundle contained related full and partial statistics. #127252 - Fixed a bug where CockroachDB could encounter spurious `(error encountered after some results were delivered)` `ERROR: context canceled` errors in rare cases when evaluating some queries. The bug was present since v22.2. The conditions that triggered the bug were queries that: - Had to be executed locally. - Had a [`LIMIT`]({% link v24.3/limit-offset.md %}). - Have at least two [`UNION`]({% link v24.3/selection-queries.md %}#union-combine-two-queries) clauses. - - Have some lookup or index [joins]({% link v24.3/joins.md %}) in the `UNION` branches. [#127076][#127076] -- Updated the restore [job]({% link v24.3/show-jobs.md %}) description from `RESTORE ... FROM` to `RESTORE FROM {backup} IN {collectionURI}` to reflect the new [`RESTORE`]({% link v24.3/restore.md %}) syntax. [#127970][#127970] -- Fixed a bug that could cause a `CASE` statement with multiple [subqueries]({% link v24.3/subqueries.md %}) to produces the side effects of one of the subqueries even if that subquery shouldn't have been evaluated. [#120327][#120327] -- Changed the [schema changer]({% link v24.3/online-schema-changes.md %})’s merge process so that it can detect [contention errors]({% link v24.3/transaction-retry-error-reference.md %}) and automatically retry with a smaller batch size. This makes the merge process more likely to succeed without needing to manually tune settings. [#128201][#128201] -- [`SHOW CREATE ALL TYPES`]({% link v24.3/show-create.md %}) now shows corresponding type comments in its output. [#128084][#128084] -- Enforce the [`statement_timeout` session setting]({% link v24.3/session-variables.md %}) when waiting for [jobs]({% link v24.3/show-jobs.md %}) after a [schema change]({% link v24.3/online-schema-changes.md %}) in an [implicit transaction]({% link v24.3/transactions.md %}#individual-statements). [#128474][#128474] -- Fixed a bug where certain dropdowns in the [DB Console]({% link v24.3/ui-overview.md %}) appeared to be empty (with no options to select from) for users of the Safari browser. [#128996][#128996] -- Fixed a bug that would cause the [`hlc_to_timestamp` function]({% link v24.3/functions-and-operators.md %}) to return an incorrect [timestamp]({% link v24.3/timestamp.md %}) for some input [decimals]({% link v24.3/decimal.md %}). [#129153][#129153] -- Fixed a memory leak where [statement insight]({% link v24.3/ui-statements-page.md %}) objects could leak if the session was closed without the [transaction]({% link v24.3/transactions.md %}) finishing. [#128400][#128400] -- Fixed a bug in the public preview [WAL failover]({% link v24.3/cockroach-start.md %}#write-ahead-log-wal-failover) feature that could prevent a node from starting if it crashed during a failover. [#129331][#129331] -- Fixed a bug where `'infinity'::TIMESTAMP` returned a different result than PostgreSQL. [#127141][#127141] -- Fixed a spurious error log from the [replication queue]({% link v24.3/ui-queues-dashboard.md %}#replication-queue) involving the text `" needs lease, not adding"`. [#129351][#129351] -- Using more than one [`DECLARE`]({% link v24.3/plpgsql.md %}#structure) statement in the definition of a [user-defined function]({% link v24.3/user-defined-functions.md %}) now correctly declares additional variables. [#129951][#129951] -- Fixed a bug in which some [`SELECT FOR UPDATE`]({% link v24.3/select-for-update.md %}) or [`SELECT FOR SHARE`]({% link v24.3/select-for-update.md %}) queries using `NOWAIT` could still block on locked rows when using the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.3/session-variables.md %}) under [serializable]({% link v24.3/demo-serializable.md %}) isolation. This bug was introduced with `optimizer_use_lock_op_for_serializable` in v23.2.0. [#130103][#130103] -- Fixed a bug in the [upgrade]({% link v24.3/upgrade-cockroach-version.md %}) pre-condition for repairing descriptor corruption that could lead to finalization being stuck. [#130064][#130064] -- Fixed a bug that caused the optimizer to plan unnecessary post-query uniqueness checks during [`INSERT`]({% link v24.3/insert.md %}), [`UPSERT`]({% link v24.3/upsert.md %}), and [`UPDATE`]({% link v24.3/update.md %}) statements on tables with partial, unique, [hash-sharded indexes]({% link v24.3/hash-sharded-indexes.md %}). These unnecessary checks added overhead to execution of these statements, and caused the statements to error when executed under [`READ COMMITTED` isolation]({% link v24.3/read-committed.md %}). [#130366][#130366] -- Fixed a bug that caused incorrect evaluation of `CASE`, [`COALESCE`]({% link v24.3/functions-and-operators.md %}#conditional-and-function-like-operators), and [`IF`]({% link v24.3/functions-and-operators.md %}#conditional-and-function-like-operators) expressions with branches producing fixed-width string-like types, such as [`CHAR`]({% link v24.3/string.md %}). In addition, the `BPCHAR` type no longer incorrectly imposes a length limit of `1`. [#129007][#129007] -- Fixed a bug where [zone configuration]({% link v24.3/configure-replication-zones.md %}) changes issued by the [declarative schema changer]({% link v24.3/online-schema-changes.md %}) were not blocked if a table had the `schema_locked` [storage parameter]({% link v24.3/with-storage-parameter.md %}) set. [#130670][#130670] -- Fixed a bug that could prevent a [`CHANGEFEED`]({% link v24.3/changefeed-examples.md %}) from being able to resume after being paused for a prolonged period of time. [#130622][#130622] -- Fixed a bug where if a client connection was attempting a [schema change]({% link v24.3/online-schema-changes.md %}) while the same [schema objects]({% link v24.3/schema-design-overview.md %}) were being dropped, it was possible for the connection to be incorrectly dropped. [#130928][#130928] + - Have some lookup or index [joins]({% link v24.3/joins.md %}) in the `UNION` branches. #127076 +- Updated the restore [job]({% link v24.3/show-jobs.md %}) description from `RESTORE ... FROM` to `RESTORE FROM {backup} IN {collectionURI}` to reflect the new [`RESTORE`]({% link v24.3/restore.md %}) syntax. #127970 +- Fixed a bug that could cause a `CASE` statement with multiple [subqueries]({% link v24.3/subqueries.md %}) to produces the side effects of one of the subqueries even if that subquery shouldn't have been evaluated. #120327 +- Changed the [schema changer]({% link v24.3/online-schema-changes.md %})’s merge process so that it can detect [contention errors]({% link v24.3/transaction-retry-error-reference.md %}) and automatically retry with a smaller batch size. This makes the merge process more likely to succeed without needing to manually tune settings. #128201 +- [`SHOW CREATE ALL TYPES`]({% link v24.3/show-create.md %}) now shows corresponding type comments in its output. #128084 +- Enforce the [`statement_timeout` session setting]({% link v24.3/session-variables.md %}) when waiting for [jobs]({% link v24.3/show-jobs.md %}) after a [schema change]({% link v24.3/online-schema-changes.md %}) in an [implicit transaction]({% link v24.3/transactions.md %}#individual-statements). #128474 +- Fixed a bug where certain dropdowns in the [DB Console]({% link v24.3/ui-overview.md %}) appeared to be empty (with no options to select from) for users of the Safari browser. #128996 +- Fixed a bug that would cause the [`hlc_to_timestamp` function]({% link v24.3/functions-and-operators.md %}) to return an incorrect [timestamp]({% link v24.3/timestamp.md %}) for some input [decimals]({% link v24.3/decimal.md %}). #129153 +- Fixed a memory leak where [statement insight]({% link v24.3/ui-statements-page.md %}) objects could leak if the session was closed without the [transaction]({% link v24.3/transactions.md %}) finishing. #128400 +- Fixed a bug in the public preview [WAL failover]({% link v24.3/cockroach-start.md %}#write-ahead-log-wal-failover) feature that could prevent a node from starting if it crashed during a failover. #129331 +- Fixed a bug where `'infinity'::TIMESTAMP` returned a different result than PostgreSQL. #127141 +- Fixed a spurious error log from the [replication queue]({% link v24.3/ui-queues-dashboard.md %}#replication-queue) involving the text `" needs lease, not adding"`. #129351 +- Using more than one [`DECLARE`]({% link v24.3/plpgsql.md %}#structure) statement in the definition of a [user-defined function]({% link v24.3/user-defined-functions.md %}) now correctly declares additional variables. #129951 +- Fixed a bug in which some [`SELECT FOR UPDATE`]({% link v24.3/select-for-update.md %}) or [`SELECT FOR SHARE`]({% link v24.3/select-for-update.md %}) queries using `NOWAIT` could still block on locked rows when using the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.3/session-variables.md %}) under [serializable]({% link v24.3/demo-serializable.md %}) isolation. This bug was introduced with `optimizer_use_lock_op_for_serializable` in v23.2.0. #130103 +- Fixed a bug in the [upgrade]({% link v24.3/upgrade-cockroach-version.md %}) pre-condition for repairing descriptor corruption that could lead to finalization being stuck. #130064 +- Fixed a bug that caused the optimizer to plan unnecessary post-query uniqueness checks during [`INSERT`]({% link v24.3/insert.md %}), [`UPSERT`]({% link v24.3/upsert.md %}), and [`UPDATE`]({% link v24.3/update.md %}) statements on tables with partial, unique, [hash-sharded indexes]({% link v24.3/hash-sharded-indexes.md %}). These unnecessary checks added overhead to execution of these statements, and caused the statements to error when executed under [`READ COMMITTED` isolation]({% link v24.3/read-committed.md %}). #130366 +- Fixed a bug that caused incorrect evaluation of `CASE`, [`COALESCE`]({% link v24.3/functions-and-operators.md %}#conditional-and-function-like-operators), and [`IF`]({% link v24.3/functions-and-operators.md %}#conditional-and-function-like-operators) expressions with branches producing fixed-width string-like types, such as [`CHAR`]({% link v24.3/string.md %}). In addition, the `BPCHAR` type no longer incorrectly imposes a length limit of `1`. #129007 +- Fixed a bug where [zone configuration]({% link v24.3/configure-replication-zones.md %}) changes issued by the [declarative schema changer]({% link v24.3/online-schema-changes.md %}) were not blocked if a table had the `schema_locked` [storage parameter]({% link v24.3/with-storage-parameter.md %}) set. #130670 +- Fixed a bug that could prevent a [`CHANGEFEED`]({% link v24.3/changefeed-examples.md %}) from being able to resume after being paused for a prolonged period of time. #130622 +- Fixed a bug where if a client connection was attempting a [schema change]({% link v24.3/online-schema-changes.md %}) while the same [schema objects]({% link v24.3/schema-design-overview.md %}) were being dropped, it was possible for the connection to be incorrectly dropped. #130928 - Fixed a bug introduced in v23.1 that could cause incorrect results when: 1. The query contained a [correlated subquery]({% link v24.3/subqueries.md %}#correlated-subqueries). 2. The correlated subquery had a [`GROUP BY`]({% link v24.3/select-clause.md %}#group-by-an-alias) or [`DISTINCT`]({% link v24.3/performance-best-practices-overview.md %}#avoid-select-distinct-for-large-tables) operator with an outer-column reference in its input. 3. The correlated subquery was in the input of a [`SELECT`]({% link v24.3/select-clause.md %}) or [`JOIN`]({% link v24.3/joins.md %}) operator. 4. The `SELECT` or `JOIN` had a filter that set the outer-column reference from (2) equal to a non-outer column in the input of the grouping operator. - 5. The grouping column set did not include the replacement column, and functionally determined the replacement column. [#130925][#130925] -- Fixed a bug which could cause errors with the message `"internal error: Non-nullable column ..."` when executing statements under [`READ COMMITTED`]({% link v24.3/read-committed.md %}) isolation that involved tables with `NOT NULL` [virtual columns]({% link v24.3/computed-columns.md %}#create-a-virtual-computed-column-using-jsonb-data). [#130725][#130725] -- Fixed a bug that could cause a very rare internal error `"lists in SetPrivate are not all the same length"` when executing queries. [#130981][#130981] -- Fixed a bug that could cause incorrect evaluation of [scalar expressions]({% link v24.3/scalar-expressions.md %}) involving `NULL` values in rare cases. [#128123][#128123] -- [`SHOW CREATE ALL SCHEMAS`]({% link v24.3/show-create.md %}) now shows corresponding schema comments in its output. [#130164][#130164] -- Fixed a bug, introduced in v23.2.0, where creating a new [incremental schedule]({% link v24.3/take-full-and-incremental-backups.md %}#incremental-backups) (using [`ALTER BACKUP SCHEDULE`]({% link v24.3/alter-backup-schedule.md %})) on a [full backup schedule]({% link v24.3/take-full-and-incremental-backups.md %}#full-backups) created on an older version would fail. [#131231][#131231] -- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) [primary key]({% link v24.3/primary-key.md %}) was locked from within a [subquery]({% link v24.3/subqueries.md %}) like ` SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;`. The error could occur either under [`READ COMMITTED`]({% link v24.3/read-committed.md %}) isolation, or with the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.3/session-variables.md %}) enabled. [#129768][#129768] -- Fixed a bug where [jobs]({% link v24.3/show-jobs.md %}) created in a session with non-zero session [timezone offsets]({% link v24.3/set-vars.md %}#set-time-zone) could hang before starting, or report incorrect creation times when viewed in [`SHOW JOBS`]({% link v24.3/show-jobs.md %}) and the [DB Console]({% link v24.3/ui-overview.md %}). [#123632][#123632] -- Fixed a bug which could result in [changefeeds using CDC queries]({% link v24.3/create-schedule-for-changefeed.md %}#create-a-scheduled-changefeed-with-cdc-queries) failing due to a system table being [garbage collected]({% link v24.3/architecture/storage-layer.md %}#garbage-collection). [#131027][#131027] -- [`ALTER COLUMN TYPE`]({% link v24.3/alter-table.md %}#convert-to-a-different-data-type) now errors out when there is a [partial index]({% link v24.3/partial-indexes.md %}) that is dependent on the column being altered. [#131590][#131590] -- Fixed a bug that prevented buffered file sinks from being included when iterating over all file sinks. This led to problems such as the `debug zip` command not being able to fetch logs for a cluster where buffering was enabled. [#130158][#130158] -- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. [#131358][#131358] + 5. The grouping column set did not include the replacement column, and functionally determined the replacement column. #130925 +- Fixed a bug which could cause errors with the message `"internal error: Non-nullable column ..."` when executing statements under [`READ COMMITTED`]({% link v24.3/read-committed.md %}) isolation that involved tables with `NOT NULL` [virtual columns]({% link v24.3/computed-columns.md %}#create-a-virtual-computed-column-using-jsonb-data). #130725 +- Fixed a bug that could cause a very rare internal error `"lists in SetPrivate are not all the same length"` when executing queries. #130981 +- Fixed a bug that could cause incorrect evaluation of [scalar expressions]({% link v24.3/scalar-expressions.md %}) involving `NULL` values in rare cases. #128123 +- [`SHOW CREATE ALL SCHEMAS`]({% link v24.3/show-create.md %}) now shows corresponding schema comments in its output. #130164 +- Fixed a bug, introduced in v23.2.0, where creating a new [incremental schedule]({% link v24.3/take-full-and-incremental-backups.md %}#incremental-backups) (using [`ALTER BACKUP SCHEDULE`]({% link v24.3/alter-backup-schedule.md %})) on a [full backup schedule]({% link v24.3/take-full-and-incremental-backups.md %}#full-backups) created on an older version would fail. #131231 +- Fixed a bug that could cause an internal error if a table with an implicit (`rowid`) [primary key]({% link v24.3/primary-key.md %}) was locked from within a [subquery]({% link v24.3/subqueries.md %}) like ` SELECT * FROM (SELECT * FROM foo WHERE x = 2) FOR UPDATE;`. The error could occur either under [`READ COMMITTED`]({% link v24.3/read-committed.md %}) isolation, or with the `optimizer_use_lock_op_for_serializable` [session setting]({% link v24.3/session-variables.md %}) enabled. #129768 +- Fixed a bug where [jobs]({% link v24.3/show-jobs.md %}) created in a session with non-zero session [timezone offsets]({% link v24.3/set-vars.md %}#set-time-zone) could hang before starting, or report incorrect creation times when viewed in [`SHOW JOBS`]({% link v24.3/show-jobs.md %}) and the [DB Console]({% link v24.3/ui-overview.md %}). #123632 +- Fixed a bug which could result in [changefeeds using CDC queries]({% link v24.3/create-schedule-for-changefeed.md %}#create-a-scheduled-changefeed-with-cdc-queries) failing due to a system table being [garbage collected]({% link v24.3/architecture/storage-layer.md %}#garbage-collection). #131027 +- [`ALTER COLUMN TYPE`]({% link v24.3/alter-table.md %}#convert-to-a-different-data-type) now errors out when there is a [partial index]({% link v24.3/partial-indexes.md %}) that is dependent on the column being altered. #131590 +- Fixed a bug that prevented buffered file sinks from being included when iterating over all file sinks. This led to problems such as the `debug zip` command not being able to fetch logs for a cluster where buffering was enabled. #130158 +- Fixed a bug where backup schedules could advance a protected timestamp too early, which caused incremental backups to fail. #131358

Performance improvements

-- [Raft log]({% link v24.3/architecture/replication-layer.md %}#raft-logs) sync callback handling is now parallelized, which can improve write-heavy workload performance on large, single-store nodes. [#126523][#126523] -- [Planning time]({% link v24.3/cost-based-optimizer.md %}) for complex queries has been reduced. [#128049][#128049] -- Reduced the [write-amplification]({% link v24.3/architecture/storage-layer.md %}#write-amplification) impact of [rebalances]({% link v24.3/architecture/replication-layer.md %}) by splitting snapshot SSTable files into smaller ones before ingesting them into [Pebble]({% link v24.3/architecture/storage-layer.md %}#pebble). [#127997][#127997] -- Improved the performance of [job-system]({% link v24.3/show-jobs.md %}) related queries. [#123848][#123848] -- The [query optimizer]({% link v24.3/cost-based-optimizer.md %}) now plans limited, [partial-index scans]({% link v24.3/partial-indexes.md %}) in more cases. [#129901][#129901] -- The initialization of the execution engine for a query is now more efficient when the [query plan]({% link v24.3/cost-based-optimizer.md %}) contains [aggregate functions]({% link v24.3/functions-and-operators.md %}#aggregate-functions). [#130834][#130834] -- Enabled multi-level [compactions]({% link v24.3/architecture/storage-layer.md %}#compaction) that moderately reduce [write amplification]({% link v24.3/architecture/storage-layer.md %}#write-amplification) within the [storage engine]({% link v24.3/architecture/storage-layer.md %}). [#131378][#131378] -- Increased the per-vCPU concurrency limits for KV operations. Specifically, increased the `kv.dist_sender.concurrency_limit` [cluster setting]({% link v24.3/cluster-settings.md %}) to 384/vCPU (up from 64/vCPU) and `kv.streamer.concurrency_limit` to 96/vCPU (up from 8/vCPU). [#131226][#131226] -- The [optimizer]({% link v24.3/cost-based-optimizer.md %}) now plans more efficient [lookup joins]({% link v24.3/joins.md %}#lookup-joins) in some cases. [#131383][#131383] +- [Raft log]({% link v24.3/architecture/replication-layer.md %}#raft-logs) sync callback handling is now parallelized, which can improve write-heavy workload performance on large, single-store nodes. #126523 +- [Planning time]({% link v24.3/cost-based-optimizer.md %}) for complex queries has been reduced. #128049 +- Reduced the [write-amplification]({% link v24.3/architecture/storage-layer.md %}#write-amplification) impact of [rebalances]({% link v24.3/architecture/replication-layer.md %}) by splitting snapshot SSTable files into smaller ones before ingesting them into [Pebble]({% link v24.3/architecture/storage-layer.md %}#pebble). #127997 +- Improved the performance of [job-system]({% link v24.3/show-jobs.md %}) related queries. #123848 +- The [query optimizer]({% link v24.3/cost-based-optimizer.md %}) now plans limited, [partial-index scans]({% link v24.3/partial-indexes.md %}) in more cases. #129901 +- The initialization of the execution engine for a query is now more efficient when the [query plan]({% link v24.3/cost-based-optimizer.md %}) contains [aggregate functions]({% link v24.3/functions-and-operators.md %}#aggregate-functions). #130834 +- Enabled multi-level [compactions]({% link v24.3/architecture/storage-layer.md %}#compaction) that moderately reduce [write amplification]({% link v24.3/architecture/storage-layer.md %}#write-amplification) within the [storage engine]({% link v24.3/architecture/storage-layer.md %}). #131378 +- Increased the per-vCPU concurrency limits for KV operations. Specifically, increased the `kv.dist_sender.concurrency_limit` [cluster setting]({% link v24.3/cluster-settings.md %}) to 384/vCPU (up from 64/vCPU) and `kv.streamer.concurrency_limit` to 96/vCPU (up from 8/vCPU). #131226 +- The [optimizer]({% link v24.3/cost-based-optimizer.md %}) now plans more efficient [lookup joins]({% link v24.3/joins.md %}#lookup-joins) in some cases. #131383

Build changes

-- Changed the AWS SDK version used for interactions with external storage from v1 to v2. [#129938][#129938] - -[#120327]: https://github.com/cockroachdb/cockroach/pull/120327 -[#123632]: https://github.com/cockroachdb/cockroach/pull/123632 -[#123848]: https://github.com/cockroachdb/cockroach/pull/123848 -[#125456]: https://github.com/cockroachdb/cockroach/pull/125456 -[#126523]: https://github.com/cockroachdb/cockroach/pull/126523 -[#126734]: https://github.com/cockroachdb/cockroach/pull/126734 -[#126901]: https://github.com/cockroachdb/cockroach/pull/126901 -[#126948]: https://github.com/cockroachdb/cockroach/pull/126948 -[#126970]: https://github.com/cockroachdb/cockroach/pull/126970 -[#127025]: https://github.com/cockroachdb/cockroach/pull/127025 -[#127066]: https://github.com/cockroachdb/cockroach/pull/127066 -[#127076]: https://github.com/cockroachdb/cockroach/pull/127076 -[#127098]: https://github.com/cockroachdb/cockroach/pull/127098 -[#127141]: https://github.com/cockroachdb/cockroach/pull/127141 -[#127145]: https://github.com/cockroachdb/cockroach/pull/127145 -[#127158]: https://github.com/cockroachdb/cockroach/pull/127158 -[#127186]: https://github.com/cockroachdb/cockroach/pull/127186 -[#127207]: https://github.com/cockroachdb/cockroach/pull/127207 -[#127252]: https://github.com/cockroachdb/cockroach/pull/127252 -[#127584]: https://github.com/cockroachdb/cockroach/pull/127584 -[#127701]: https://github.com/cockroachdb/cockroach/pull/127701 -[#127816]: https://github.com/cockroachdb/cockroach/pull/127816 -[#127827]: https://github.com/cockroachdb/cockroach/pull/127827 -[#127836]: https://github.com/cockroachdb/cockroach/pull/127836 -[#127850]: https://github.com/cockroachdb/cockroach/pull/127850 -[#127970]: https://github.com/cockroachdb/cockroach/pull/127970 -[#127997]: https://github.com/cockroachdb/cockroach/pull/127997 -[#128032]: https://github.com/cockroachdb/cockroach/pull/128032 -[#128049]: https://github.com/cockroachdb/cockroach/pull/128049 -[#128084]: https://github.com/cockroachdb/cockroach/pull/128084 -[#128123]: https://github.com/cockroachdb/cockroach/pull/128123 -[#128170]: https://github.com/cockroachdb/cockroach/pull/128170 -[#128201]: https://github.com/cockroachdb/cockroach/pull/128201 -[#128400]: https://github.com/cockroachdb/cockroach/pull/128400 -[#128474]: https://github.com/cockroachdb/cockroach/pull/128474 -[#128498]: https://github.com/cockroachdb/cockroach/pull/128498 -[#128506]: https://github.com/cockroachdb/cockroach/pull/128506 -[#128788]: https://github.com/cockroachdb/cockroach/pull/128788 -[#128794]: https://github.com/cockroachdb/cockroach/pull/128794 -[#128813]: https://github.com/cockroachdb/cockroach/pull/128813 -[#128842]: https://github.com/cockroachdb/cockroach/pull/128842 -[#128996]: https://github.com/cockroachdb/cockroach/pull/128996 -[#129001]: https://github.com/cockroachdb/cockroach/pull/129001 -[#129007]: https://github.com/cockroachdb/cockroach/pull/129007 -[#129024]: https://github.com/cockroachdb/cockroach/pull/129024 -[#129052]: https://github.com/cockroachdb/cockroach/pull/129052 -[#129149]: https://github.com/cockroachdb/cockroach/pull/129149 -[#129153]: https://github.com/cockroachdb/cockroach/pull/129153 -[#129216]: https://github.com/cockroachdb/cockroach/pull/129216 -[#129331]: https://github.com/cockroachdb/cockroach/pull/129331 -[#129351]: https://github.com/cockroachdb/cockroach/pull/129351 -[#129378]: https://github.com/cockroachdb/cockroach/pull/129378 -[#129450]: https://github.com/cockroachdb/cockroach/pull/129450 -[#129564]: https://github.com/cockroachdb/cockroach/pull/129564 -[#129768]: https://github.com/cockroachdb/cockroach/pull/129768 -[#129840]: https://github.com/cockroachdb/cockroach/pull/129840 -[#129846]: https://github.com/cockroachdb/cockroach/pull/129846 -[#129901]: https://github.com/cockroachdb/cockroach/pull/129901 -[#129938]: https://github.com/cockroachdb/cockroach/pull/129938 -[#129946]: https://github.com/cockroachdb/cockroach/pull/129946 -[#129951]: https://github.com/cockroachdb/cockroach/pull/129951 -[#130064]: https://github.com/cockroachdb/cockroach/pull/130064 -[#130089]: https://github.com/cockroachdb/cockroach/pull/130089 -[#130103]: https://github.com/cockroachdb/cockroach/pull/130103 -[#130110]: https://github.com/cockroachdb/cockroach/pull/130110 -[#130117]: https://github.com/cockroachdb/cockroach/pull/130117 -[#130158]: https://github.com/cockroachdb/cockroach/pull/130158 -[#130164]: https://github.com/cockroachdb/cockroach/pull/130164 -[#130167]: https://github.com/cockroachdb/cockroach/pull/130167 -[#130198]: https://github.com/cockroachdb/cockroach/pull/130198 -[#130333]: https://github.com/cockroachdb/cockroach/pull/130333 -[#130366]: https://github.com/cockroachdb/cockroach/pull/130366 -[#130418]: https://github.com/cockroachdb/cockroach/pull/130418 -[#130576]: https://github.com/cockroachdb/cockroach/pull/130576 -[#130622]: https://github.com/cockroachdb/cockroach/pull/130622 -[#130670]: https://github.com/cockroachdb/cockroach/pull/130670 -[#130704]: https://github.com/cockroachdb/cockroach/pull/130704 -[#130725]: https://github.com/cockroachdb/cockroach/pull/130725 -[#130834]: https://github.com/cockroachdb/cockroach/pull/130834 -[#130897]: https://github.com/cockroachdb/cockroach/pull/130897 -[#130925]: https://github.com/cockroachdb/cockroach/pull/130925 -[#130928]: https://github.com/cockroachdb/cockroach/pull/130928 -[#130981]: https://github.com/cockroachdb/cockroach/pull/130981 -[#131027]: https://github.com/cockroachdb/cockroach/pull/131027 -[#131040]: https://github.com/cockroachdb/cockroach/pull/131040 -[#131043]: https://github.com/cockroachdb/cockroach/pull/131043 -[#131097]: https://github.com/cockroachdb/cockroach/pull/131097 -[#131150]: https://github.com/cockroachdb/cockroach/pull/131150 -[#131151]: https://github.com/cockroachdb/cockroach/pull/131151 -[#131226]: https://github.com/cockroachdb/cockroach/pull/131226 -[#131231]: https://github.com/cockroachdb/cockroach/pull/131231 -[#131250]: https://github.com/cockroachdb/cockroach/pull/131250 -[#131309]: https://github.com/cockroachdb/cockroach/pull/131309 -[#131326]: https://github.com/cockroachdb/cockroach/pull/131326 -[#131358]: https://github.com/cockroachdb/cockroach/pull/131358 -[#131378]: https://github.com/cockroachdb/cockroach/pull/131378 -[#131383]: https://github.com/cockroachdb/cockroach/pull/131383 -[#131396]: https://github.com/cockroachdb/cockroach/pull/131396 -[#131422]: https://github.com/cockroachdb/cockroach/pull/131422 -[#131463]: https://github.com/cockroachdb/cockroach/pull/131463 -[#131480]: https://github.com/cockroachdb/cockroach/pull/131480 -[#131529]: https://github.com/cockroachdb/cockroach/pull/131529 -[#131578]: https://github.com/cockroachdb/cockroach/pull/131578 -[#131590]: https://github.com/cockroachdb/cockroach/pull/131590 -[#131594]: https://github.com/cockroachdb/cockroach/pull/131594 -[#131625]: https://github.com/cockroachdb/cockroach/pull/131625 -[#131642]: https://github.com/cockroachdb/cockroach/pull/131642 -[#131661]: https://github.com/cockroachdb/cockroach/pull/131661 -[#131686]: https://github.com/cockroachdb/cockroach/pull/131686 -[#131687]: https://github.com/cockroachdb/cockroach/pull/131687 -[#131688]: https://github.com/cockroachdb/cockroach/pull/131688 -[#131689]: https://github.com/cockroachdb/cockroach/pull/131689 -[#131690]: https://github.com/cockroachdb/cockroach/pull/131690 -[#131691]: https://github.com/cockroachdb/cockroach/pull/131691 -[#131693]: https://github.com/cockroachdb/cockroach/pull/131693 -[#131717]: https://github.com/cockroachdb/cockroach/pull/131717 -[#131777]: https://github.com/cockroachdb/cockroach/pull/131777 -[#131778]: https://github.com/cockroachdb/cockroach/pull/131778 -[#93067]: https://github.com/cockroachdb/cockroach/pull/93067 +- Changed the AWS SDK version used for interactions with external storage from v1 to v2. #129938 + diff --git a/src/current/_includes/releases/v24.3/v24.3.0-alpha.2.md b/src/current/_includes/releases/v24.3/v24.3.0-alpha.2.md index 317b4731bfd..4a5b08fbb04 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0-alpha.2.md +++ b/src/current/_includes/releases/v24.3/v24.3.0-alpha.2.md @@ -6,22 +6,22 @@ Release Date: October 14, 2024

Security updates

-- The parameters for an [HBA config entry]({% link v24.3/security-reference/authentication.md %}#hba-configuration-syntax) for LDAP are now validated when the entry is created or amended, in addition to the validation that happens during an authentication attempt. [#132086][#132086] +- The parameters for an [HBA config entry]({% link v24.3/security-reference/authentication.md %}#hba-configuration-syntax) for LDAP are now validated when the entry is created or amended, in addition to the validation that happens during an authentication attempt. #132086 -- Added automatic cleanup and validation for [default privileges]({% link v24.3/security-reference/authorization.md%}#default-privileges) that reference dropped roles after a major-version upgrade to v24.3. [#131782][#131782] +- Added automatic cleanup and validation for [default privileges]({% link v24.3/security-reference/authorization.md%}#default-privileges) that reference dropped roles after a major-version upgrade to v24.3. #131782

General changes

-- Changed the license `cockroach` is distributed under to the new CockroachDB Software License (CSL). [#131799][#131799] [#131794][#131794] [#131793][#131793] +- Changed the license `cockroach` is distributed under to the new CockroachDB Software License (CSL). #131799 #131794 #131793

{{ site.data.products.enterprise }} edition changes

-- You can now [authenticate to the DB console API]({% link v24.3/ui-overview.md %}#authentication) by supplying a Java Web Token (JWT) as a Bearer token in the Authorization header. [#130779][#130779] +- You can now [authenticate to the DB console API]({% link v24.3/ui-overview.md %}#authentication) by supplying a Java Web Token (JWT) as a Bearer token in the Authorization header. #130779

SQL language changes

-- To view comments on a type, you can use the new [`SHOW TYPES WITH COMMENT`]({% link v24.3/show-types.md %}#) command. Comments can be added using [`COMMENT ON`]({% link v24.3/comment-on.md %}). [#131183][#131183] -- You can create or alter a [user-defined function (UDF)]({% link v24.3/user-defined-functions.md %}) or [stored procedure (SP)]({% link v24.3/stored-procedures.md %}) with `[EXTERNAL] SECURITY DEFINER` instead of the default `[EXTERNAL] SECURITY INVOKER`. With `SECURITY DEFINER`, the privileges of the owner are checked when the UDF or SP is executed, rather than the privileges of the executor. The `EXTERNAL` keyword is optional and exists for SQL language conformity. [#129720][#129720] +- To view comments on a type, you can use the new [`SHOW TYPES WITH COMMENT`]({% link v24.3/show-types.md %}#) command. Comments can be added using [`COMMENT ON`]({% link v24.3/comment-on.md %}). #131183 +- You can create or alter a [user-defined function (UDF)]({% link v24.3/user-defined-functions.md %}) or [stored procedure (SP)]({% link v24.3/stored-procedures.md %}) with `[EXTERNAL] SECURITY DEFINER` instead of the default `[EXTERNAL] SECURITY INVOKER`. With `SECURITY DEFINER`, the privileges of the owner are checked when the UDF or SP is executed, rather than the privileges of the executor. The `EXTERNAL` keyword is optional and exists for SQL language conformity. #129720

Operational changes

@@ -36,7 +36,7 @@ Release Date: October 14, 2024 - `kvflowcontrol.send_queue.scheduled.deducted_bytes` - `kvflowcontrol.send_queue.scheduled.force_flush` - [#131857][#131857] + #131857 - The following [metrics]({% link v24.3/metrics.md %}) have been renamed: @@ -47,48 +47,26 @@ Release Date: October 14, 2024 `kvflowcontrol.tokens.send.regular.disconnected` | `kvflowcontrol.tokens.send.regular.returned.disconnect` `kvflowcontrol.tokens.send.elastic.disconnected` | `kvflowcontrol.tokens.send.elastic.returned.disconnect` - [#131857][#131857] + #131857

Cluster virtualization changes

-- The `_status/ranges/` endpoint on DB Console [Advanced debug pages]({% link v24.3/ui-debug-pages.md %}) is now enabled for non-system virtual clusters, where it returns the ranges only for the tenant you are logged into. For the system virtual cluster, the `_status/ranges/` endpoint continues to return ranges for the specified node across all virtual clusters. [#131100][#131100] +- The `_status/ranges/` endpoint on DB Console [Advanced debug pages]({% link v24.3/ui-debug-pages.md %}) is now enabled for non-system virtual clusters, where it returns the ranges only for the tenant you are logged into. For the system virtual cluster, the `_status/ranges/` endpoint continues to return ranges for the specified node across all virtual clusters. #131100

DB Console changes

-- Improved performance in the **Databases**, **Tables View**, and **Table Details** sections of the [**Databases page**]({% link v24.3/ui-databases-page.md %}) [#131769][#131769] +- Improved performance in the **Databases**, **Tables View**, and **Table Details** sections of the [**Databases page**]({% link v24.3/ui-databases-page.md %}) #131769

Bug fixes

-- Fixed a bug where JSON values returned by `cockroach` commands using the `--format=sql` flag were not correctly escaped if they contained double quotes within a string. [#131881][#131881] -- Fixed an error that could happen if an [aggregate function]({% link v24.3/functions-and-operators.md %}#aggregate-functions) was used as the value in a `SET` command. [#131891][#131891] -- Fixed a rare bug introduced in v22.2 in which an update of a [primary key]({% link v24.3/primary-key.md %}) column could fail to update the primary index if it is also the only column in a separate column family. [#131869][#131869] -- Fixed a rare bug where dropping a column of `FLOAT4`, `FLOAT8`, `DECIMAL`, `JSON`, `ARRAY`, or collate `STRING` type stored in a single [column family]({% link v24.3/column-families.md %}) could prevent subsequent reading of the table if the column family was not the first column family. [#131967][#131967] -- Fixed an `unimplemented` internal error that could occur when ordering by a [`VECTOR`]({% link v24.3/vector.md %}) column. [#131703][#131703] +- Fixed a bug where JSON values returned by `cockroach` commands using the `--format=sql` flag were not correctly escaped if they contained double quotes within a string. #131881 +- Fixed an error that could happen if an [aggregate function]({% link v24.3/functions-and-operators.md %}#aggregate-functions) was used as the value in a `SET` command. #131891 +- Fixed a rare bug introduced in v22.2 in which an update of a [primary key]({% link v24.3/primary-key.md %}) column could fail to update the primary index if it is also the only column in a separate column family. #131869 +- Fixed a rare bug where dropping a column of `FLOAT4`, `FLOAT8`, `DECIMAL`, `JSON`, `ARRAY`, or collate `STRING` type stored in a single [column family]({% link v24.3/column-families.md %}) could prevent subsequent reading of the table if the column family was not the first column family. #131967 +- Fixed an `unimplemented` internal error that could occur when ordering by a [`VECTOR`]({% link v24.3/vector.md %}) column. #131703

Performance improvements

-- Efficiency has been improved when writing string-like values over the PostgreSQL wire protocol. [#131964][#131964] -- Error handling during periodic table history polling has been improved when the `schema_locked` [table parameter]({% link v24.3/with-storage-parameter.md %}#table-parameters) is not used. [#131951][#131951] - -[#129720]: https://github.com/cockroachdb/cockroach/pull/129720 -[#130779]: https://github.com/cockroachdb/cockroach/pull/130779 -[#131183]: https://github.com/cockroachdb/cockroach/pull/131183 -[#131703]: https://github.com/cockroachdb/cockroach/pull/131703 -[#131714]: https://github.com/cockroachdb/cockroach/pull/131714 -[#131769]: https://github.com/cockroachdb/cockroach/pull/131769 -[#131782]: https://github.com/cockroachdb/cockroach/pull/131782 -[#131793]: https://github.com/cockroachdb/cockroach/pull/131793 -[#131794]: https://github.com/cockroachdb/cockroach/pull/131794 -[#131799]: https://github.com/cockroachdb/cockroach/pull/131799 -[#131805]: https://github.com/cockroachdb/cockroach/pull/131805 -[#131827]: https://github.com/cockroachdb/cockroach/pull/131827 -[#131857]: https://github.com/cockroachdb/cockroach/pull/131857 -[#131869]: https://github.com/cockroachdb/cockroach/pull/131869 -[#131881]: https://github.com/cockroachdb/cockroach/pull/131881 -[#131891]: https://github.com/cockroachdb/cockroach/pull/131891 -[#131951]: https://github.com/cockroachdb/cockroach/pull/131951 -[#131964]: https://github.com/cockroachdb/cockroach/pull/131964 -[#131967]: https://github.com/cockroachdb/cockroach/pull/131967 -[#132086]: https://github.com/cockroachdb/cockroach/pull/132086 -[#132100]: https://github.com/cockroachdb/cockroach/pull/132100 -[#131100]: https://github.com/cockroachdb/cockroach/pull/131100 +- Efficiency has been improved when writing string-like values over the PostgreSQL wire protocol. #131964 +- Error handling during periodic table history polling has been improved when the `schema_locked` [table parameter]({% link v24.3/with-storage-parameter.md %}#table-parameters) is not used. #131951 + diff --git a/src/current/_includes/releases/v24.3/v24.3.0-beta.1.md b/src/current/_includes/releases/v24.3/v24.3.0-beta.1.md index b663ff4d5cc..47ae0fc9e88 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0-beta.1.md +++ b/src/current/_includes/releases/v24.3/v24.3.0-beta.1.md @@ -6,83 +6,52 @@ Release Date: October 24, 2024

General changes

-- The cluster setting [`diagnostics.reporting.enabled`]({% link v24.3/cluster-settings.md %}#setting-diagnostics-reporting-enabled) is now ignored if the cluster has a [{{ site.data.products.enterprise }} Trial or {{ site.data.products.enterprise }} Free license]({% link v24.3/licensing-faqs.md %}#types-of-licenses), or if the reporting job is unable to load any license at all. [#132257][#132257] +- The cluster setting [`diagnostics.reporting.enabled`]({% link v24.3/cluster-settings.md %}#setting-diagnostics-reporting-enabled) is now ignored if the cluster has a [{{ site.data.products.enterprise }} Trial or {{ site.data.products.enterprise }} Free license]({% link v24.3/licensing-faqs.md %}#types-of-licenses), or if the reporting job is unable to load any license at all. #132257

{{ site.data.products.enterprise }} edition changes

-- This change ensures authorization with LDAP only works when the `ldapgrouplistfilter` option is present in the [HBA configuration]({% link v24.3/security-reference/authentication.md %}#hba-configuration-syntax), otherwise authentication will proceed with the provided LDAP auth method options in the HBA configuration. This change is to ensure external authorization with LDAP is opt-in rather than enabled by default. [#132235][#132235] -- Added a [changefeed sink]({% link v24.3/changefeed-sinks.md %}) error metric `changefeed.sink_errors`, and expanded reporting of the internal retries metric `changefeed.internal_retry_message_count` to all sinks that perform internal retries. [#132092][#132092] +- This change ensures authorization with LDAP only works when the `ldapgrouplistfilter` option is present in the [HBA configuration]({% link v24.3/security-reference/authentication.md %}#hba-configuration-syntax), otherwise authentication will proceed with the provided LDAP auth method options in the HBA configuration. This change is to ensure external authorization with LDAP is opt-in rather than enabled by default. #132235 +- Added a [changefeed sink]({% link v24.3/changefeed-sinks.md %}) error metric `changefeed.sink_errors`, and expanded reporting of the internal retries metric `changefeed.internal_retry_message_count` to all sinks that perform internal retries. #132092

SQL language changes

-- Implemented `DROP TRIGGER` statements. The `CASCADE` option for dropping a [trigger]({% link v24.3/sql-feature-support.md %}) is not supported. [#128540][#128540] -- Added support for `CREATE TRIGGER`. The `OR REPLACE` syntax is not supported. Also, [triggers]({% link v24.3/sql-feature-support.md %}) cannot be executed, so creation is a no-op. [#128540][#128540] -- [`REGIONAL BY ROW`]({% link v24.3/table-localities.md %}#regional-by-row-tables) and [`PARTITION ALL BY`]({% link v24.3/partitioning.md %}) tables can now be inserted into under [non-`SERIALIZABLE` isolation levels]({% link v24.3/read-committed.md %}) as long as there is no `ON CONFLICT` clause in the statement. Also, [`REGIONAL BY ROW`]({% link v24.3/table-localities.md %}#regional-by-row-tables) and [`PARTITION ALL BY`]({% link v24.3/partitioning.md %}) tables can now be updated under non-`SERIALIZABLE` isolation levels. [#129837][#129837] -- Attempting to add [foreign keys]({% link v24.3/foreign-key.md %}) referencing a table with [row-level TTL]({% link v24.3/row-level-ttl.md %}) enabled will generate a notice informing the user about potential impact on the row-level TTL deletion job. Similarly, a notice is generated while attempting to enable row-level TTL on a table that has inbound foreign key references. [#127935][#127935] -- It is now possible to assign to an element of a composite typed variable in [PL/pgSQL]({% link v24.3/plpgsql.md %}). For example, given a variable `foo` with two integer elements `x` and `y`, the following assignment statement is allowed: `foo.x := 100;`. [#132628][#132628] -- [Backup]({% link v24.3/backup.md %}) and [restore]({% link v24.3/restore.md %}) now work for tables with triggers. When the [`skip_missing_udfs` option]({% link v24.3/restore.md %}#skip-missing-udfs) is applied, triggers with missing trigger functions are removed from the table. [#128555][#128555] -- `UPSERT and INSERT ... ON CONFLICT` statements are now supported on [`REGIONAL BY ROW`]({% link v24.3/table-localities.md %}#regional-by-row-tables) tables under [`READ COMMITTED` isolation]({% link v24.3/read-committed.md %}). [#132768][#132768] -- Added support for row-level `BEFORE` triggers. A row-level trigger executes the trigger function for each row that is being mutated. `BEFORE` triggers fire before the mutation operation. [#132511][#132511] -- Added support for [PL/pgSQL]({% link v24.3/plpgsql.md %}) integer `FOR` loops, which iterate over a range of integer values. [#130211][#130211] +- Implemented `DROP TRIGGER` statements. The `CASCADE` option for dropping a [trigger]({% link v24.3/sql-feature-support.md %}) is not supported. #128540 +- Added support for `CREATE TRIGGER`. The `OR REPLACE` syntax is not supported. Also, [triggers]({% link v24.3/sql-feature-support.md %}) cannot be executed, so creation is a no-op. #128540 +- [`REGIONAL BY ROW`]({% link v24.3/table-localities.md %}#regional-by-row-tables) and [`PARTITION ALL BY`]({% link v24.3/partitioning.md %}) tables can now be inserted into under [non-`SERIALIZABLE` isolation levels]({% link v24.3/read-committed.md %}) as long as there is no `ON CONFLICT` clause in the statement. Also, [`REGIONAL BY ROW`]({% link v24.3/table-localities.md %}#regional-by-row-tables) and [`PARTITION ALL BY`]({% link v24.3/partitioning.md %}) tables can now be updated under non-`SERIALIZABLE` isolation levels. #129837 +- Attempting to add [foreign keys]({% link v24.3/foreign-key.md %}) referencing a table with [row-level TTL]({% link v24.3/row-level-ttl.md %}) enabled will generate a notice informing the user about potential impact on the row-level TTL deletion job. Similarly, a notice is generated while attempting to enable row-level TTL on a table that has inbound foreign key references. #127935 +- It is now possible to assign to an element of a composite typed variable in [PL/pgSQL]({% link v24.3/plpgsql.md %}). For example, given a variable `foo` with two integer elements `x` and `y`, the following assignment statement is allowed: `foo.x := 100;`. #132628 +- [Backup]({% link v24.3/backup.md %}) and [restore]({% link v24.3/restore.md %}) now work for tables with triggers. When the [`skip_missing_udfs` option]({% link v24.3/restore.md %}#skip-missing-udfs) is applied, triggers with missing trigger functions are removed from the table. #128555 +- `UPSERT and INSERT ... ON CONFLICT` statements are now supported on [`REGIONAL BY ROW`]({% link v24.3/table-localities.md %}#regional-by-row-tables) tables under [`READ COMMITTED` isolation]({% link v24.3/read-committed.md %}). #132768 +- Added support for row-level `BEFORE` triggers. A row-level trigger executes the trigger function for each row that is being mutated. `BEFORE` triggers fire before the mutation operation. #132511 +- Added support for [PL/pgSQL]({% link v24.3/plpgsql.md %}) integer `FOR` loops, which iterate over a range of integer values. #130211

Operational changes

-- [Admission Control]({% link v24.3/admission-control.md %}) now has an integration for pacing snapshot ingest traffic based on disk bandwidth. `kvadmission.store.snapshot_ingest_bandwidth_control.enabled` is used to turn on this integration. It requires provisioned bandwidth to be set for the store (or cluster through the [cluster setting]({% link v24.3/cluster-settings.md %})) for it to take effect. [#131243][#131243] -- Added validation to check whether [audit logging]({% link v24.3/configure-logs.md %}) and [buffering configurations]({% link v24.3/configure-logs.md %}#log-buffering-for-network-sinks) are both present in the [file log sink]({% link v24.3/configure-logs.md %}#configure-log-sinks). Audit logging and buffering configuration should not both exist in the file log sink. [#132742][#132742] -- Updated the [file log sink]({% link v24.3/configure-logs.md %}#configure-log-sinks) validation message. This would give clear indication to the user about the expected valid configuration. [#132899][#132899] +- [Admission Control]({% link v24.3/admission-control.md %}) now has an integration for pacing snapshot ingest traffic based on disk bandwidth. `kvadmission.store.snapshot_ingest_bandwidth_control.enabled` is used to turn on this integration. It requires provisioned bandwidth to be set for the store (or cluster through the [cluster setting]({% link v24.3/cluster-settings.md %})) for it to take effect. #131243 +- Added validation to check whether [audit logging]({% link v24.3/configure-logs.md %}) and [buffering configurations]({% link v24.3/configure-logs.md %}#log-buffering-for-network-sinks) are both present in the [file log sink]({% link v24.3/configure-logs.md %}#configure-log-sinks). Audit logging and buffering configuration should not both exist in the file log sink. #132742 +- Updated the [file log sink]({% link v24.3/configure-logs.md %}#configure-log-sinks) validation message. This would give clear indication to the user about the expected valid configuration. #132899

DB Console changes

-- The value of the automatic [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) collection cluster setting [`sql.stats.automatic_collection.enabled`]({% link v24.3/cluster-settings.md %}#setting-sql-stats-automatic-collection-enabled) is now in the top right corner of the [**Databases**]({% link v24.3/ui-databases-page.md %}) overview page. [#132269][#132269] -- In the new [**Databases**]({% link v24.3/ui-databases-page.md %}) and [**Tables**]({% link v24.3/ui-databases-page.md %}#tables-list-tab) pages, when cached data is being refreshed, the refresh button will be disabled and its tooltip text will display, `Data is currently refreshing`. [#132462][#132462] +- The value of the automatic [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) collection cluster setting [`sql.stats.automatic_collection.enabled`]({% link v24.3/cluster-settings.md %}#setting-sql-stats-automatic-collection-enabled) is now in the top right corner of the [**Databases**]({% link v24.3/ui-databases-page.md %}) overview page. #132269 +- In the new [**Databases**]({% link v24.3/ui-databases-page.md %}) and [**Tables**]({% link v24.3/ui-databases-page.md %}#tables-list-tab) pages, when cached data is being refreshed, the refresh button will be disabled and its tooltip text will display, `Data is currently refreshing`. #132462

Bug fixes

-- Addressed a rare bug that could prevent [backups]({% link v24.3/backup.md %}) taken during a [`DROP COLUMN`]({% link v24.3/alter-table.md %}#drop-column) operation with a [sequence]({% link v24.3/create-sequence.md %}) owner from [restoring]({% link v24.3/restore.md %}) with the error: `rewriting descriptor ids: missing rewrite for in SequenceOwner...`. [#132202][#132202] -- Fixed a bug existing since before v23.1 that could lead to incorrect results in rare cases. The bug requires a [join]({% link v24.3/joins.md %}) between two tables with an equality between columns with equivalent, but not identical [types]({% link v24.3/data-types.md %}) (e.g., `OID` and `REGCLASS`). In addition, the join must lookup into an [index]({% link v24.3/indexes.md %}) that includes a [computed column]({% link v24.3/computed-columns.md %}) that references one of the equivalent columns. [#126345][#126345] -- Fixed a bug existing since before v23.1 that could lead to incorrect results in rare cases. The bug requires a lookup [join]({% link v24.3/joins.md %}) into a table with a computed [index]({% link v24.3/indexes.md %}) column, where the [computed column]({% link v24.3/computed-columns.md %}) expression is composite sensitive. A composite sensitive expression can compare differently if supplied non-identical but equivalent input values (e.g., `2.0::DECIMAL` versus `2.00::DECIMAL`). [#126345][#126345] -- Fixed a bug that caused quotes around the name of a routine to be dropped when it was called within another [routine]({% link v24.3/user-defined-functions.md %}). This could prevent the correct routine from being resolved if the nested routine name was case-sensitive. The bug has existed since v24.1 when nested routines were introduced. [#131643][#131643] -- Fixed a bug where the [SQL shell]({% link v24.3/cockroach-sql.md %}) would print out the previous error message when executing the `quit` command. [#130736][#130736] -- Fixed a bug where a [span statistics]({% link v24.3/show-ranges.md %}#span-statistics) request on a mixed-version cluster resulted in a null pointer exception. [#132349][#132349] -- Fixed an issue where [changefeeds]({% link v24.3/change-data-capture-overview.md %}) would fail to update [protected timestamp records]({% link v24.3/protect-changefeed-data.md %}) in the face of [retryable errors]({% link v24.3/monitor-and-debug-changefeeds.md %}#changefeed-retry-errors). [#132712][#132712] -- The [`franz-go`](https://github.com/twmb/franz-go) library has been updated to fix a potential deadlock on [changefeed]({% link v24.3/change-data-capture-overview.md %}) restarts. [#132761][#132761] -- Fixed a bug that in rare cases could cause incorrect evaluation of [scalar expressions]({% link v24.3/scalar-expressions.md %}) involving `NULL` values. [#132261][#132261] -- Fixed a bug in the query [optimizer]({% link v24.3/cost-based-optimizer.md %}) that in rare cases could cause CockroachDB nodes to crash. The bug could occur when a query contains a filter in the form `col IN (elem0, elem1, ..., elemN)` only when `N` is very large, (e.g., 1.6+ million), and when `col` exists in a [hash-sharded index]({% link v24.3/hash-sharded-indexes.md %}), or exists a table with an indexed, [computed column]({% link v24.3/computed-columns.md %}) dependent on `col`. [#132701][#132701] -- The `proretset` column of the [`pg_catalog.pg_proc`]({% link v24.3/pg-catalog.md %}) table is now properly set to `true` for set-returning built-in functions. [#132853][#132853] -- Fixed an error that could be caused by using an [`AS OF SYSTEM TIME`]({% link v24.3/as-of-system-time.md %}) expression that references a user-defined (or unknown) type name. These kinds of expressions are invalid, but previously the error was not handled properly. Now, a correct error message is returned. [#132348][#132348] +- Addressed a rare bug that could prevent [backups]({% link v24.3/backup.md %}) taken during a [`DROP COLUMN`]({% link v24.3/alter-table.md %}#drop-column) operation with a [sequence]({% link v24.3/create-sequence.md %}) owner from [restoring]({% link v24.3/restore.md %}) with the error: `rewriting descriptor ids: missing rewrite for in SequenceOwner...`. #132202 +- Fixed a bug existing since before v23.1 that could lead to incorrect results in rare cases. The bug requires a [join]({% link v24.3/joins.md %}) between two tables with an equality between columns with equivalent, but not identical [types]({% link v24.3/data-types.md %}) (e.g., `OID` and `REGCLASS`). In addition, the join must lookup into an [index]({% link v24.3/indexes.md %}) that includes a [computed column]({% link v24.3/computed-columns.md %}) that references one of the equivalent columns. #126345 +- Fixed a bug existing since before v23.1 that could lead to incorrect results in rare cases. The bug requires a lookup [join]({% link v24.3/joins.md %}) into a table with a computed [index]({% link v24.3/indexes.md %}) column, where the [computed column]({% link v24.3/computed-columns.md %}) expression is composite sensitive. A composite sensitive expression can compare differently if supplied non-identical but equivalent input values (e.g., `2.0::DECIMAL` versus `2.00::DECIMAL`). #126345 +- Fixed a bug that caused quotes around the name of a routine to be dropped when it was called within another [routine]({% link v24.3/user-defined-functions.md %}). This could prevent the correct routine from being resolved if the nested routine name was case-sensitive. The bug has existed since v24.1 when nested routines were introduced. #131643 +- Fixed a bug where the [SQL shell]({% link v24.3/cockroach-sql.md %}) would print out the previous error message when executing the `quit` command. #130736 +- Fixed a bug where a [span statistics]({% link v24.3/show-ranges.md %}#span-statistics) request on a mixed-version cluster resulted in a null pointer exception. #132349 +- Fixed an issue where [changefeeds]({% link v24.3/change-data-capture-overview.md %}) would fail to update [protected timestamp records]({% link v24.3/protect-changefeed-data.md %}) in the face of [retryable errors]({% link v24.3/monitor-and-debug-changefeeds.md %}#changefeed-retry-errors). #132712 +- The [`franz-go`](https://github.com/twmb/franz-go) library has been updated to fix a potential deadlock on [changefeed]({% link v24.3/change-data-capture-overview.md %}) restarts. #132761 +- Fixed a bug that in rare cases could cause incorrect evaluation of [scalar expressions]({% link v24.3/scalar-expressions.md %}) involving `NULL` values. #132261 +- Fixed a bug in the query [optimizer]({% link v24.3/cost-based-optimizer.md %}) that in rare cases could cause CockroachDB nodes to crash. The bug could occur when a query contains a filter in the form `col IN (elem0, elem1, ..., elemN)` only when `N` is very large, (e.g., 1.6+ million), and when `col` exists in a [hash-sharded index]({% link v24.3/hash-sharded-indexes.md %}), or exists a table with an indexed, [computed column]({% link v24.3/computed-columns.md %}) dependent on `col`. #132701 +- The `proretset` column of the [`pg_catalog.pg_proc`]({% link v24.3/pg-catalog.md %}) table is now properly set to `true` for set-returning built-in functions. #132853 +- Fixed an error that could be caused by using an [`AS OF SYSTEM TIME`]({% link v24.3/as-of-system-time.md %}) expression that references a user-defined (or unknown) type name. These kinds of expressions are invalid, but previously the error was not handled properly. Now, a correct error message is returned. #132348

Build changes

-- Upgraded to Go v1.23.2. [#132111][#132111] - -[#126345]: https://github.com/cockroachdb/cockroach/pull/126345 -[#127935]: https://github.com/cockroachdb/cockroach/pull/127935 -[#128540]: https://github.com/cockroachdb/cockroach/pull/128540 -[#128555]: https://github.com/cockroachdb/cockroach/pull/128555 -[#129837]: https://github.com/cockroachdb/cockroach/pull/129837 -[#130211]: https://github.com/cockroachdb/cockroach/pull/130211 -[#130736]: https://github.com/cockroachdb/cockroach/pull/130736 -[#131243]: https://github.com/cockroachdb/cockroach/pull/131243 -[#131291]: https://github.com/cockroachdb/cockroach/pull/131291 -[#131643]: https://github.com/cockroachdb/cockroach/pull/131643 -[#132023]: https://github.com/cockroachdb/cockroach/pull/132023 -[#132092]: https://github.com/cockroachdb/cockroach/pull/132092 -[#132111]: https://github.com/cockroachdb/cockroach/pull/132111 -[#132202]: https://github.com/cockroachdb/cockroach/pull/132202 -[#132235]: https://github.com/cockroachdb/cockroach/pull/132235 -[#132257]: https://github.com/cockroachdb/cockroach/pull/132257 -[#132261]: https://github.com/cockroachdb/cockroach/pull/132261 -[#132269]: https://github.com/cockroachdb/cockroach/pull/132269 -[#132348]: https://github.com/cockroachdb/cockroach/pull/132348 -[#132349]: https://github.com/cockroachdb/cockroach/pull/132349 -[#132462]: https://github.com/cockroachdb/cockroach/pull/132462 -[#132478]: https://github.com/cockroachdb/cockroach/pull/132478 -[#132511]: https://github.com/cockroachdb/cockroach/pull/132511 -[#132628]: https://github.com/cockroachdb/cockroach/pull/132628 -[#132701]: https://github.com/cockroachdb/cockroach/pull/132701 -[#132712]: https://github.com/cockroachdb/cockroach/pull/132712 -[#132742]: https://github.com/cockroachdb/cockroach/pull/132742 -[#132761]: https://github.com/cockroachdb/cockroach/pull/132761 -[#132768]: https://github.com/cockroachdb/cockroach/pull/132768 -[#132853]: https://github.com/cockroachdb/cockroach/pull/132853 -[#132899]: https://github.com/cockroachdb/cockroach/pull/132899 +- Upgraded to Go v1.23.2. #132111 + diff --git a/src/current/_includes/releases/v24.3/v24.3.0-beta.2.md b/src/current/_includes/releases/v24.3/v24.3.0-beta.2.md index 2e68bc6fce9..cf4b7680e59 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0-beta.2.md +++ b/src/current/_includes/releases/v24.3/v24.3.0-beta.2.md @@ -6,28 +6,22 @@ Release Date: October 28, 2024

SQL language changes

-- If a table is the destination of a logical data replication stream, then only schema change statements that are deemed safe are allowed on the table. Safe statements are those that do not result in a rebuild of the primary [index]({% link v24.3/indexes.md %}) and do not create an index on a virtual [computed column]({% link v24.3/computed-columns.md %}). [#133266][#133266] +- If a table is the destination of a logical data replication stream, then only schema change statements that are deemed safe are allowed on the table. Safe statements are those that do not result in a rebuild of the primary [index]({% link v24.3/indexes.md %}) and do not create an index on a virtual [computed column]({% link v24.3/computed-columns.md %}). #133266

Operational changes

-- The two new metrics `sql.crud_query.count` and `sql.crud_query.started.count` measure the number of [`INSERT`]({% link v24.3/insert.md %})/[`UPDATE`]({% link v24.3/update.md %})/[`DELETE`]({% link v24.3/delete.md %})/[`SELECT`]({% link v24.3/selection-queries.md %}) queries executed and started respectively. [#133198][#133198] -- When creating a logical data replication stream, any [user-defined types]({% link v24.3/create-type.md %}) in the source and destination are now checked for equivalency. This allows for creating a stream that handles user-defined types without needing to use the `WITH SKIP SCHEMA CHECK` option as long as the stream uses `mode = immediate`. [#133274][#133274] -- Logical data replication streams that reference tables with [user-defined types]({% link v24.3/create-type.md %}) can now be created with the `mode = immediate` option. [#133295][#133295] +- The two new metrics `sql.crud_query.count` and `sql.crud_query.started.count` measure the number of [`INSERT`]({% link v24.3/insert.md %})/[`UPDATE`]({% link v24.3/update.md %})/[`DELETE`]({% link v24.3/delete.md %})/[`SELECT`]({% link v24.3/selection-queries.md %}) queries executed and started respectively. #133198 +- When creating a logical data replication stream, any [user-defined types]({% link v24.3/create-type.md %}) in the source and destination are now checked for equivalency. This allows for creating a stream that handles user-defined types without needing to use the `WITH SKIP SCHEMA CHECK` option as long as the stream uses `mode = immediate`. #133274 +- Logical data replication streams that reference tables with [user-defined types]({% link v24.3/create-type.md %}) can now be created with the `mode = immediate` option. #133295

DB Console changes

-- The **SQL Statements** graph on the [**Overview**]({% link v24.3/ui-overview-dashboard.md %}) and [**SQL**]({% link v24.3/ui-sql-dashboard.md %}) dashboard pages in DB Console has been renamed [**SQL Queries Per Second**]({% link v24.3/ui-overview-dashboard.md %}#sql-queries-per-second) and now shows **Total Queries** as a general Queries Per Second (QPS) metric. [#133198][#133198] -- Due to the inaccuracy of the **Range Count** column on the [**Databases** page]({% link v24.3/ui-databases-page.md %}) and the cost incurred to fetch the correct range count for every database in a cluster, this data will no longer be visible. This data is still available via a [`SHOW RANGES`]({% link v24.3/show-ranges.md %}) query. [#133267][#133267] +- The **SQL Statements** graph on the [**Overview**]({% link v24.3/ui-overview-dashboard.md %}) and [**SQL**]({% link v24.3/ui-sql-dashboard.md %}) dashboard pages in DB Console has been renamed [**SQL Queries Per Second**]({% link v24.3/ui-overview-dashboard.md %}#sql-queries-per-second) and now shows **Total Queries** as a general Queries Per Second (QPS) metric. #133198 +- Due to the inaccuracy of the **Range Count** column on the [**Databases** page]({% link v24.3/ui-databases-page.md %}) and the cost incurred to fetch the correct range count for every database in a cluster, this data will no longer be visible. This data is still available via a [`SHOW RANGES`]({% link v24.3/show-ranges.md %}) query. #133267

Bug fixes

-- Users with the [`admin` role]({% link v24.3/security-reference/authorization.md %}#admin-role) can now run [`ALTER DEFAULT PRIVILEGES FOR target_role ...`]({% link v24.3/alter-default-privileges.md %}) on any `target_role`. Previously, this could result in a privilege error, which is incorrect as `admin`s are allowed to perform any operation. [#133072][#133072] -- [`REASSIGN OWNED BY current_owner_role ...`]({% link v24.3/reassign-owned.md %}) will now transfer ownership of the `public` schema. Previously, it would always skip over the `public` schema even if it was owned by the `current_owner_role`. [#133072][#133072] +- Users with the [`admin` role]({% link v24.3/security-reference/authorization.md %}#admin-role) can now run [`ALTER DEFAULT PRIVILEGES FOR target_role ...`]({% link v24.3/alter-default-privileges.md %}) on any `target_role`. Previously, this could result in a privilege error, which is incorrect as `admin`s are allowed to perform any operation. #133072 +- [`REASSIGN OWNED BY current_owner_role ...`]({% link v24.3/reassign-owned.md %}) will now transfer ownership of the `public` schema. Previously, it would always skip over the `public` schema even if it was owned by the `current_owner_role`. #133072 -[#133072]: https://github.com/cockroachdb/cockroach/pull/133072 -[#133198]: https://github.com/cockroachdb/cockroach/pull/133198 -[#133266]: https://github.com/cockroachdb/cockroach/pull/133266 -[#133267]: https://github.com/cockroachdb/cockroach/pull/133267 -[#133274]: https://github.com/cockroachdb/cockroach/pull/133274 -[#133295]: https://github.com/cockroachdb/cockroach/pull/133295 diff --git a/src/current/_includes/releases/v24.3/v24.3.0-beta.3.md b/src/current/_includes/releases/v24.3/v24.3.0-beta.3.md index 15622478f23..79f734ac490 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0-beta.3.md +++ b/src/current/_includes/releases/v24.3/v24.3.0-beta.3.md @@ -6,26 +6,26 @@ Release Date: November 5, 2024

Security updates

-- Client authentication errors using LDAP now log more details to help with troubleshooting authentication and authorization issues. [#133812][#133812] +- Client authentication errors using LDAP now log more details to help with troubleshooting authentication and authorization issues. #133812

SQL changes

-- [Physical Cluster Replication]({% link v24.3/physical-cluster-replication-overview.md %}) reader catalogs now bypass AOST timestamps using the `bypass_pcr_reader_catalog_aost` session variable, which can be used to modify cluster settings within the reader. [#133876][#133876] +- [Physical Cluster Replication]({% link v24.3/physical-cluster-replication-overview.md %}) reader catalogs now bypass AOST timestamps using the `bypass_pcr_reader_catalog_aost` session variable, which can be used to modify cluster settings within the reader. #133876

Operational changes

-- Added a timer for inner [changefeed sink]({% link v24.3/changefeed-sinks.md %}) client flushes. [#133288][#133288] -- Rows replicated by Logical Data Replication in `immediate` mode are now considered in the decision to recompute SQL table statistics. [#133591][#133591] -- The new cluster setting `kvadmission.flow_controller.token_reset_epoch` can be used to refill replication [admission control]({% link v24.3/admission-control.md %}) v2 tokens. This is an advanced setting. Use it only after consultation with your account team. [#133294][#133294] -- The new cluster setting `goschedstats.always_use_short_sample_period.enabled`, when set to `true`, helps to prevent unnecessary queueing due to CPU [admission control]({% link v24.3/admission-control.md %}s. [#133585][#133585] +- Added a timer for inner [changefeed sink]({% link v24.3/changefeed-sinks.md %}) client flushes. #133288 +- Rows replicated by Logical Data Replication in `immediate` mode are now considered in the decision to recompute SQL table statistics. #133591 +- The new cluster setting `kvadmission.flow_controller.token_reset_epoch` can be used to refill replication [admission control]({% link v24.3/admission-control.md %}) v2 tokens. This is an advanced setting. Use it only after consultation with your account team. #133294 +- The new cluster setting `goschedstats.always_use_short_sample_period.enabled`, when set to `true`, helps to prevent unnecessary queueing due to CPU [admission control]({% link v24.3/admission-control.md %}s. #133585

DB Console changes

-- In [Database]({% link v24.3/ui-databases-page.md %}) pages, the **Refresh** tooltip now includes details about the progress of cache updates and when the job started. [#133351][#133351] +- In [Database]({% link v24.3/ui-databases-page.md %}) pages, the **Refresh** tooltip now includes details about the progress of cache updates and when the job started. #133351

Bug fixes

-- Fixed a bug where [changefeed sink]({% link v24.3/changefeed-sinks.md %})) timers were not correctly registered with the metric system. [#133288][#133288] +- Fixed a bug where [changefeed sink]({% link v24.3/changefeed-sinks.md %})) timers were not correctly registered with the metric system. #133288 - Fixed a bug that could cause new connections to fail with the following error after upgrading: `ERROR: invalid value for parameter "vectorize": "unknown(1)" SQLSTATE: 22023 HINT: Available values: off,on,experimental_always`. To encounter this bug, the cluster must have: 1. Run on version v21.1 at some point in the past 1. Run `SET CLUSTER SETTING sql.defaults.vectorize = 'on';` while running v21.1. @@ -46,26 +46,11 @@ Release Date: November 5, 2024 RESET CLUSTER SETTING sql.defaults.vectorize; ~~~ - `1` is now allowed as a value for this setting, and is equivalent to `on`. [#133371][#133371] -- Fixed a bug in v22.2.13+, v23.1.9+, and v23.2 that could cause the internal error `interface conversion: coldata.Column is` in an edge case. [#133762][#133762] -- Fixed a bug introduced in v20.1.0 that could cause erroneous `NOT NULL` constraint violation errors to be logged during `UPSERT` and `INSERT` statements with the `ON CONFLICT ...DO UPDATE` clause that update an existing row and a subset of columns that did not include a `NOT NULL` column of the table. [#133820][#133820] -- Fixed a that could cache and reuse a non-reusable query plan, such as a plan for a DDL or `SHOW` statement, when `plan_cache_mode` was set to `auto` or `force_generic_plan`, which are not the default options. [#133073][#133073] -- Fixed an unhandled error that could occur while running the command `REVOKE ... ON SEQUENCE FROM ... {user}` on an object that is not a sequence. [#133710][#133710] -- Fixed a panic that could occur while running a `CREATE TABLE AS` statement that included a [sequence]({% link v24.3/create-sequence.md %}) with an invalid function overload. [#133870][#133870] - - -[#133073]: https://github.com/cockroachdb/cockroach/pull/133073 -[#133288]: https://github.com/cockroachdb/cockroach/pull/133288 -[#133294]: https://github.com/cockroachdb/cockroach/pull/133294 -[#133351]: https://github.com/cockroachdb/cockroach/pull/133351 -[#133371]: https://github.com/cockroachdb/cockroach/pull/133371 -[#133414]: https://github.com/cockroachdb/cockroach/pull/133414 -[#133472]: https://github.com/cockroachdb/cockroach/pull/133472 -[#133585]: https://github.com/cockroachdb/cockroach/pull/133585 -[#133591]: https://github.com/cockroachdb/cockroach/pull/133591 -[#133710]: https://github.com/cockroachdb/cockroach/pull/133710 -[#133762]: https://github.com/cockroachdb/cockroach/pull/133762 -[#133812]: https://github.com/cockroachdb/cockroach/pull/133812 -[#133820]: https://github.com/cockroachdb/cockroach/pull/133820 -[#133870]: https://github.com/cockroachdb/cockroach/pull/133870 -[#133876]: https://github.com/cockroachdb/cockroach/pull/133876 + `1` is now allowed as a value for this setting, and is equivalent to `on`. #133371 +- Fixed a bug in v22.2.13+, v23.1.9+, and v23.2 that could cause the internal error `interface conversion: coldata.Column is` in an edge case. #133762 +- Fixed a bug introduced in v20.1.0 that could cause erroneous `NOT NULL` constraint violation errors to be logged during `UPSERT` and `INSERT` statements with the `ON CONFLICT ...DO UPDATE` clause that update an existing row and a subset of columns that did not include a `NOT NULL` column of the table. #133820 +- Fixed a that could cache and reuse a non-reusable query plan, such as a plan for a DDL or `SHOW` statement, when `plan_cache_mode` was set to `auto` or `force_generic_plan`, which are not the default options. #133073 +- Fixed an unhandled error that could occur while running the command `REVOKE ... ON SEQUENCE FROM ... {user}` on an object that is not a sequence. #133710 +- Fixed a panic that could occur while running a `CREATE TABLE AS` statement that included a [sequence]({% link v24.3/create-sequence.md %}) with an invalid function overload. #133870 + + diff --git a/src/current/_includes/releases/v24.3/v24.3.0-rc.1.md b/src/current/_includes/releases/v24.3/v24.3.0-rc.1.md index 2e4a709a00b..69b0b0c6474 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0-rc.1.md +++ b/src/current/_includes/releases/v24.3/v24.3.0-rc.1.md @@ -6,39 +6,39 @@ Release Date: November 18, 2024

Security updates

-- All cluster settings that accept strings are now fully redacted when transmitted as part of CockroachDB's diagnostics telemetry. This payload includes a record of modified cluster settings and their values when they are not strings. Customers who previously applied the mitigations in Technical Advisory 133479 can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. [#134018][#134018] +- All cluster settings that accept strings are now fully redacted when transmitted as part of CockroachDB's diagnostics telemetry. This payload includes a record of modified cluster settings and their values when they are not strings. Customers who previously applied the mitigations in Technical Advisory 133479 can safely set the value of cluster setting `server.redact_sensitive_settings.enabled` to false and turn on diagnostic reporting via the `diagnostics.reporting.enabled` cluster setting without leaking sensitive cluster settings values. #134018

SQL language changes

-- Row-level `AFTER` triggers can now be executed in response to mutations on a table. Row-level `AFTER` triggers fire after checks and cascades have completed for the query. [#133320][#133320] -- Cascades can now execute row-level `BEFORE` triggers. By default, attempting to modify or eliminate the cascading `UPDATE` or `DELETE` operation results in a `Triggered Data Change Violation` error. To bypass this error, you can set the `unsafe_allow_triggers_modifying_cascades` query option to `true`. This could result in constraint violations. [#134444][#134444] -- String constants can now be compared with collated strings. [#134086][#134086] +- Row-level `AFTER` triggers can now be executed in response to mutations on a table. Row-level `AFTER` triggers fire after checks and cascades have completed for the query. #133320 +- Cascades can now execute row-level `BEFORE` triggers. By default, attempting to modify or eliminate the cascading `UPDATE` or `DELETE` operation results in a `Triggered Data Change Violation` error. To bypass this error, you can set the `unsafe_allow_triggers_modifying_cascades` query option to `true`. This could result in constraint violations. #134444 +- String constants can now be compared with collated strings. #134086

Operational changes

-- The `kvadmission.low_pri_read_elastic_control.enabled` cluster setting has been removed, because all bulk requests are now subject to elastic admission control admission by default. [#134486][#134486] +- The `kvadmission.low_pri_read_elastic_control.enabled` cluster setting has been removed, because all bulk requests are now subject to elastic admission control admission by default. #134486 - The following metrics have been added for Logic Data Replication (LDR): - `logical_replication.catchup_ranges`: the number of source side ranges conducting catchup scans. - `logical_replication.scanning_ranges`: the number source side ranges conducting initial scans. - - In the DB Console, these metrics may not be accurate if multiple LDR jobs are running. The metrics are accurate when exported from the Prometheus endpoint. [#134674][#134674] -- The backup and restore syntax update of `cockroach workload` which was introduced in [#134610][#134610] #has been reverted. [#134645][#134645] + - In the DB Console, these metrics may not be accurate if multiple LDR jobs are running. The metrics are accurate when exported from the Prometheus endpoint. #134674 +- The backup and restore syntax update of `cockroach workload` which was introduced in #134610 #has been reverted. #134645

DB Console changes

-- After finalizing an upgrade to v24.3, an updated version of the **Databases** page will be available. [#134244][#134244] -- Users with the `CONNECT` privilege can now access the **Databases** page. [#134542][#134542] +- After finalizing an upgrade to v24.3, an updated version of the **Databases** page will be available. #134244 +- Users with the `CONNECT` privilege can now access the **Databases** page. #134542

Bug fixes

-- Fixed a bug where an LDAP connection would be closed by the server and would not be retried by CockroachDB. [#134277] -- Fixed a bug that prevented LDAP authorization from successfully assigning CockroachDB roles to users when the source group name contained periods or hyphens. [#134944][#134944] -- Fixed a bug introduced in v22.2 that could cause significantly increased query latency while executing queries with index or lookup joins when the ordering needs to be maintained [#134367][#134367] -- Fixed a bug where `UPSERT` statements on regional by row tables under non-serializable isolations would not display show uniqueness constraints in `EXPLAIN` output. Even when not displayed, the constraints were enforced. [#134267][#134267] -- Fixed a bug where uniqueness constraints constraints enforced with tombstone writes were not shown in the output of `EXPLAIN (OPT)`. [#134482][#134482] -- Fixed a bug where `DISCARD ALL` statements were erroneously counted under the `sql.ddl.count` metric instead of the `sql.misc.count` metric. [#134510][#134510] -- Fixed a bug that could cause a backup or restore operation on AWS to fail with a KMS error due to a missing `default` shared config. [#134536][#134536] -- Fixed a bug that could prevent a user from running schema change operations on a restored table that was previously apart of a Logic Data Replication (LDR) stream. [#134675][#134675] +- Fixed a bug where an LDAP connection would be closed by the server and would not be retried by CockroachDB. #134277 +- Fixed a bug that prevented LDAP authorization from successfully assigning CockroachDB roles to users when the source group name contained periods or hyphens. #134944 +- Fixed a bug introduced in v22.2 that could cause significantly increased query latency while executing queries with index or lookup joins when the ordering needs to be maintained #134367 +- Fixed a bug where `UPSERT` statements on regional by row tables under non-serializable isolations would not display show uniqueness constraints in `EXPLAIN` output. Even when not displayed, the constraints were enforced. #134267 +- Fixed a bug where uniqueness constraints constraints enforced with tombstone writes were not shown in the output of `EXPLAIN (OPT)`. #134482 +- Fixed a bug where `DISCARD ALL` statements were erroneously counted under the `sql.ddl.count` metric instead of the `sql.misc.count` metric. #134510 +- Fixed a bug that could cause a backup or restore operation on AWS to fail with a KMS error due to a missing `default` shared config. #134536 +- Fixed a bug that could prevent a user from running schema change operations on a restored table that was previously apart of a Logic Data Replication (LDR) stream. #134675

Performance improvements

@@ -49,33 +49,9 @@ Release Date: November 18, 2024 SELECT * FROM t WHERE j->'a' = '10' AND j->'b' = '20' ~~~ - [#134002][#134002] + #134002

Build changes

-- Upgraded to Go 1.22.8 [#134427][#134427] - -[#133320]: https://github.com/cockroachdb/cockroach/pull/133320 -[#133997]: https://github.com/cockroachdb/cockroach/pull/133997 -[#134002]: https://github.com/cockroachdb/cockroach/pull/134002 -[#134018]: https://github.com/cockroachdb/cockroach/pull/134018 -[#134086]: https://github.com/cockroachdb/cockroach/pull/134086 -[#134089]: https://github.com/cockroachdb/cockroach/pull/134089 -[#134097]: https://github.com/cockroachdb/cockroach/pull/134097 -[#134244]: https://github.com/cockroachdb/cockroach/pull/134244 -[#134267]: https://github.com/cockroachdb/cockroach/pull/134267 -[#134277]: https://github.com/cockroachdb/cockroach/pull/134277 -[#134367]: https://github.com/cockroachdb/cockroach/pull/134367 -[#134427]: https://github.com/cockroachdb/cockroach/pull/134427 -[#134444]: https://github.com/cockroachdb/cockroach/pull/134444 -[#134448]: https://github.com/cockroachdb/cockroach/pull/134448 -[#134482]: https://github.com/cockroachdb/cockroach/pull/134482 -[#134486]: https://github.com/cockroachdb/cockroach/pull/134486 -[#134510]: https://github.com/cockroachdb/cockroach/pull/134510 -[#134536]: https://github.com/cockroachdb/cockroach/pull/134536 -[#134542]: https://github.com/cockroachdb/cockroach/pull/134542 -[#134645]: https://github.com/cockroachdb/cockroach/pull/134645 -[#134674]: https://github.com/cockroachdb/cockroach/pull/134674 -[#134675]: https://github.com/cockroachdb/cockroach/pull/134675 -[#134610]: https://github.com/cockroachdb/cockroach/pull/134610 -[#134944]: https://github.com/cockroachdb/cockroach/pull/134944 +- Upgraded to Go 1.22.8 #134427 + diff --git a/src/current/_includes/releases/v24.3/v24.3.0.md b/src/current/_includes/releases/v24.3/v24.3.0.md index 5d8cdf074ef..e4df872f2dd 100644 --- a/src/current/_includes/releases/v24.3/v24.3.0.md +++ b/src/current/_includes/releases/v24.3/v24.3.0.md @@ -86,32 +86,32 @@ Changes to [cluster settings]({% link v24.3/cluster-settings.md %}) should be re
Settings added
-- `goschedstats.always_use_short_sample_period.enabled`: when set to `true`, helps to prevent unnecessary queueing due to CPU [admission control]({% link v24.3/admission-control.md %}) by forcing `1ms` sampling of runnable queue lengths. The default value is `false`. [#133585][#133585] +- `goschedstats.always_use_short_sample_period.enabled`: when set to `true`, helps to prevent unnecessary queueing due to CPU [admission control]({% link v24.3/admission-control.md %}) by forcing `1ms` sampling of runnable queue lengths. The default value is `false`. #133585 -- `kv.range.range_size_hard_cap`: allows you to limit how large a [range]({% link v24.3/architecture/overview.md %}#architecture-range) can grow before [backpressure]({% link v24.3/common-errors.md %}#split-failed-while-applying-backpressure-are-rows-updated-in-a-tight-loop) is applied. This can help to mitigate against a situation where a range cannot be split, such as when a range is comprised of a single key due to an issue with the schema or workload pattern, or a bug in client application code. The default is `8 GiB`, 16 times the default maximum range size. If you have changed the maximum range size, you may need to adjust this cluster setting or reduce the range size. [#129450][#129450] +- `kv.range.range_size_hard_cap`: allows you to limit how large a [range]({% link v24.3/architecture/overview.md %}#architecture-range) can grow before [backpressure]({% link v24.3/common-errors.md %}#split-failed-while-applying-backpressure-are-rows-updated-in-a-tight-loop) is applied. This can help to mitigate against a situation where a range cannot be split, such as when a range is comprised of a single key due to an issue with the schema or workload pattern, or a bug in client application code. The default is `8 GiB`, 16 times the default maximum range size. If you have changed the maximum range size, you may need to adjust this cluster setting or reduce the range size. #129450 -- `kvadmission.flow_controller.token_reset_epoch`: can be used to refill replication [admission control]({% link v24.3/admission-control.md %}) v2 tokens. This setting is marked as `reserved`, as it is not supported for tuning, by default. Use it only after consultation with your account team. [#133294][#133294] +- `kvadmission.flow_controller.token_reset_epoch`: can be used to refill replication [admission control]({% link v24.3/admission-control.md %}) v2 tokens. This setting is marked as `reserved`, as it is not supported for tuning, by default. Use it only after consultation with your account team. #133294 -- `kvadmission.store.snapshot_ingest_bandwidth_control.enabled`: enables a new [Admission Control]({% link v24.3/admission-control.md %}) integration for pacing snapshot ingest traffic based on disk bandwidth. It requires provisioned bandwidth to be set for the store, or the cluster through the setting `kvadmission.store.provisioned_bandwidth`, for it to take effect. [#131243][#131243] +- `kvadmission.store.snapshot_ingest_bandwidth_control.enabled`: enables a new [Admission Control]({% link v24.3/admission-control.md %}) integration for pacing snapshot ingest traffic based on disk bandwidth. It requires provisioned bandwidth to be set for the store, or the cluster through the setting `kvadmission.store.provisioned_bandwidth`, for it to take effect. #131243 - Settings have been added which control the refresh behavior for the cached data in the Databases page of the [DB Console]({% link v24.3/ui-overview.md %}): - `obs.tablemetadatacache.data_valid_duration`: the duration for which the data in `system.table_metadata` is considered valid before a cache reset will occur. Default: 20 minutes. - `obs.tablemetadatacache.automatic_updates.enabled`: whether to automatically update the cache according the validity interval. Default: `false`. - [#130198][#130198] + #130198 -- `server.jwt_authentication.client.timeout`: the HTTP client timeout for external calls made during [JWT authentication]({% link v24.3/sso-sql.md %}). [#127145][#127145] +- `server.jwt_authentication.client.timeout`: the HTTP client timeout for external calls made during [JWT authentication]({% link v24.3/sso-sql.md %}). #127145 - Partial [statistics]({% link v24.3/cost-based-optimizer.md %}#table-statistics) can now be automatically collected at the extremes of indexes when a certain fraction and minimum number of rows are stale (by default 5% and 100%, respectively). These can be configured with new [table storage parameters]({% link v24.3/alter-table.md %}#set-and-reset-storage-parameters) and [cluster settings]({% link v24.3/cluster-settings.md %}): - `sql.stats.automatic_partial_collection.enabled` (table parameter `sql_stats_automatic_partial_collection_enabled`) - both default to `false`. - `sql.stats.automatic_partial_collection.min_stale_rows` (table parameter `sql_stats_automatic_partial_collection_min_stale_rows`) - both default to `100`. - `sql.stats.automatic_partial_collection.fraction_stale_rows` (table parameter `sql_stats_automatic_partial_collection_fraction_stale_rows`) - both default to `0.05`. - [#93067][#93067] + #93067 -- `sql.stats.histogram_buckets.include_most_common_values.enabled`: controls whether common values are included in [histogram collection]({% link v24.3/cost-based-optimizer.md %}#control-histogram-collection) for use by the [optimizer]({% link v24.3/cost-based-optimizer.md %}). When enabled, histogram buckets will represent the most common sampled values as upper bounds. [#129378][#129378] +- `sql.stats.histogram_buckets.include_most_common_values.enabled`: controls whether common values are included in [histogram collection]({% link v24.3/cost-based-optimizer.md %}#control-histogram-collection) for use by the [optimizer]({% link v24.3/cost-based-optimizer.md %}). When enabled, histogram buckets will represent the most common sampled values as upper bounds. #129378 -- `sql.stats.histogram_buckets.max_fraction_most_common_values`: controls the fraction of buckets that can be adjusted to include common values. Defaults to `0.1`. [#129378][#129378] +- `sql.stats.histogram_buckets.max_fraction_most_common_values`: controls the fraction of buckets that can be adjusted to include common values. Defaults to `0.1`. #129378 - `sql.txn.repeatable_read_isolation.enabled`: defaults to `false`. When set to `true`, the following statements configure transactions to run under `REPEATABLE READ` isolation, rather than being automatically interpreted as [`SERIALIZABLE`]({% link v24.3/demo-serializable.md %}): - `BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ` @@ -131,7 +131,7 @@ Changes to [cluster settings]({% link v24.3/cluster-settings.md %}) should be re - When running `SHOW CLUSTER SETTING`, the displayed setting values will depend on the node's number of vCPUs. - Contact Support if the number of `distsender.batches.async.throttled` requests is persistently greater than zero. - [#131226][#131226] + #131226 - The default for `server.oidc_authentication.client.timeout`, which sets the client timeout for external calls made during OIDC authentication, has changed from `30s` to `15s`. @@ -144,9 +144,9 @@ The following settings are now marked `public` after previously being `reserved` 1. a provider or protocol (`azure`, `gs`, `s3`, `http`, `nodelocal`, `userfile`, or `nullsink`) 1. `read` or `write` 1. `node_burst_limit` or `node_rate_limit` - - For example, `cloudstorage.s3.write.node_burst_limit`. [#127207][#127207] + - For example, `cloudstorage.s3.write.node_burst_limit`. #127207 -- JWT authentication have been made `public`. [#128170][#128170] +- JWT authentication have been made `public`. #128170 - `server.jwt_authentication.audience` - `server.jwt_authentication.claim` - `server.jwt_authentication.enabled` @@ -161,13 +161,13 @@ The following settings are now marked `public` after previously being `reserved`
Additional cluster setting changes
-- The setting `server.host_based_authentication.configuration` now supports LDAP configuration, and its value is now redacted for non-admin users when the `server.redact_sensitive_settings.enabled` is set to `true`. [#131150][#131150] +- The setting `server.host_based_authentication.configuration` now supports LDAP configuration, and its value is now redacted for non-admin users when the `server.redact_sensitive_settings.enabled` is set to `true`. #131150 -- The settings [`enterprise.license`]({% link v24.3/cluster-settings.md %}#setting-enterprise-license) and [`diagnostics.reporting.enabled`]({% link v24.3/cluster-settings.md %}#setting-diagnostics-reporting-enabled) now have additional validation. To disable diagnostics reporting, the cluster must also have a license that is not an [{{ site.data.products.enterprise }} Trial or {{ site.data.products.enterprise }} Free license]({% link v24.3/licensing-faqs.md %}#types-of-licenses). Additionally, to set one of these licenses, the cluster must already be submitting diagnostics information. [#131097][#131097] [#132257][#132257] +- The settings [`enterprise.license`]({% link v24.3/cluster-settings.md %}#setting-enterprise-license) and [`diagnostics.reporting.enabled`]({% link v24.3/cluster-settings.md %}#setting-diagnostics-reporting-enabled) now have additional validation. To disable diagnostics reporting, the cluster must also have a license that is not an [{{ site.data.products.enterprise }} Trial or {{ site.data.products.enterprise }} Free license]({% link v24.3/licensing-faqs.md %}#types-of-licenses). Additionally, to set one of these licenses, the cluster must already be submitting diagnostics information. #131097 #132257 -- `sql.defaults.vectorize` now supports the value `1` (in addition to `0` and `2`) to indicate `on`, to address a bug that could cause new connections to fail after an upgrade with a message referencing an `invalid value for parameter "vectorize": "unknown(1)"`. [#133371][#133371] +- `sql.defaults.vectorize` now supports the value `1` (in addition to `0` and `2`) to indicate `on`, to address a bug that could cause new connections to fail after an upgrade with a message referencing an `invalid value for parameter "vectorize": "unknown(1)"`. #133371 -- The description of the setting [`changefeed.sink_io_workers`]({% link v24.3/cluster-settings.md %}#setting-changefeed-sink-io-workers) has been updated to reflect all of the [sinks]({% link v24.3/changefeed-sinks.md %}) that support the setting: the batching versions of webhook, pubsub, and kafka sinks that are enabled by `changefeed.new__sink_enabled`. [#129946][#129946] +- The description of the setting [`changefeed.sink_io_workers`]({% link v24.3/cluster-settings.md %}#setting-changefeed-sink-io-workers) has been updated to reflect all of the [sinks]({% link v24.3/changefeed-sinks.md %}) that support the setting: the batching versions of webhook, pubsub, and kafka sinks that are enabled by `changefeed.new__sink_enabled`. #129946 @@ -193,4 +193,3 @@ Docs | [SQL Feature Support]({% link v24.3/sql-feature-support.m Docs | [Change Data Capture Overview]({% link v24.3/change-data-capture-overview.md %}) | This page summarizes CockroachDB's data streaming capabilities. Change data capture (CDC) provides efficient, distributed, row-level changefeeds into a configurable sink for downstream processing such as reporting, caching, or full-text indexing. Docs | [Backup Architecture]({% link v24.3/backup-architecture.md %}) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 diff --git a/src/current/_includes/releases/v24.3/v24.3.1.md b/src/current/_includes/releases/v24.3/v24.3.1.md index 4db85eb32a8..9a4ad7ddde0 100644 --- a/src/current/_includes/releases/v24.3/v24.3.1.md +++ b/src/current/_includes/releases/v24.3/v24.3.1.md @@ -6,73 +6,40 @@ Release Date: December 12, 2024

SQL language changes

-- When triggers fire one another cyclically, the new `recursion_depth_limit` setting now limits the depth of the recursion. By default, the limit is `1000` nested trigger executions. [#135046][#135046] +- When triggers fire one another cyclically, the new `recursion_depth_limit` setting now limits the depth of the recursion. By default, the limit is `1000` nested trigger executions. #135046

Operational changes

-- The metrics scrape HTTP endpoint at `/ _status/vars` will now truncate HELP text at the first sentence, reducing the metadata for metrics with large descriptions. Customers can still access these descriptions via our docs. [#135021][#135021] -- The row-level TTL job now periodically updates the progress meter in the jobs introspection interfaces, including `SHOW JOBS` and the Jobs page in the DB console. [#135171][#135171] -- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. [#136481][#136481] +- The metrics scrape HTTP endpoint at `/ _status/vars` will now truncate HELP text at the first sentence, reducing the metadata for metrics with large descriptions. Customers can still access these descriptions via our docs. #135021 +- The row-level TTL job now periodically updates the progress meter in the jobs introspection interfaces, including `SHOW JOBS` and the Jobs page in the DB console. #135171 +- Telemetry delivery is now considered successful even in cases where we experience a network timeout. This will prevent throttling in cases outside an operator's control. #136481

DB Console changes

-- When activating statement diagnostics in the DB Console, users now have the option to produce a redacted bundle as output. This bundle will omit sensitive data. [#134993][#134993] +- When activating statement diagnostics in the DB Console, users now have the option to produce a redacted bundle as output. This bundle will omit sensitive data. #134993

Other changes

-- Protected timestamp records for changefeeds now include the `system.users` table. This ensures that user information remains available when running CDC queries against historical data. [#134238][#134238] +- Protected timestamp records for changefeeds now include the `system.users` table. This ensures that user information remains available when running CDC queries against historical data. #134238

Bug fixes

-- Fixed a bug that could cause `DELETE` triggers not to fire on cascading delete, and which could cause `INSERT` triggers to match incorrectly in the same scenario. [#134896][#134896] -- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. [#134480][#134480] -- When a non-admin user runs `DROP ROLE IF EXISTS` on a user that does not exist, an error is no longer returned. [#134970][#134970] -- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g. with an equality filter). [#135037][#135037] -- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135181][#135181] -- A bug has been fixed that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#134710][#134710] -- Prevent `ALTER DATABASE` operations that modify the zone config from hanging if an invalid zone config already exists. [#135216][#135216] -- `CREATE SCHEMA` now returns the correct error if a schema name is missing. [#135928][#135928] -- `percentile_cont` and `percentile_disc` aggregate functions now support `float4` data type inputs. Previously, these functions would return an error when used with `float4` values. [#135764][#135764] -- `security.certificate.*` metrics now update correctly when certificates are reloaded during node runtime. Previously, these metrics would not reflect changes to certificates after node startup. [#136227][#136227] -- SQL roles created from LDAP groups that contain periods (.) or hyphens (-) in their Common Names (CN) no longer result in authorization failures. [#134942][#134942] -- LDAP authorization now supports partial group mapping, allowing users to authenticate even when some LDAP groups do not have corresponding CockroachDB roles. Previously, authentication would fail if any LDAP group lacked a matching database role. [#135587][#135587] -- Regional by row tables with uniqueness constraints where the region is not part of those uniqueness constraints and which also contain non-unique indices will now have those constraints properly enforced when modified at `READ COMMITTED` isolation. This bug was introduced in v24.3.0. [#137367][#137367] +- Fixed a bug that could cause `DELETE` triggers not to fire on cascading delete, and which could cause `INSERT` triggers to match incorrectly in the same scenario. #134896 +- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. #134480 +- When a non-admin user runs `DROP ROLE IF EXISTS` on a user that does not exist, an error is no longer returned. #134970 +- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g. with an equality filter). #135037 +- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. #135181 +- A bug has been fixed that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. #134710 +- Prevent `ALTER DATABASE` operations that modify the zone config from hanging if an invalid zone config already exists. #135216 +- `CREATE SCHEMA` now returns the correct error if a schema name is missing. #135928 +- `percentile_cont` and `percentile_disc` aggregate functions now support `float4` data type inputs. Previously, these functions would return an error when used with `float4` values. #135764 +- `security.certificate.*` metrics now update correctly when certificates are reloaded during node runtime. Previously, these metrics would not reflect changes to certificates after node startup. #136227 +- SQL roles created from LDAP groups that contain periods (.) or hyphens (-) in their Common Names (CN) no longer result in authorization failures. #134942 +- LDAP authorization now supports partial group mapping, allowing users to authenticate even when some LDAP groups do not have corresponding CockroachDB roles. Previously, authentication would fail if any LDAP group lacked a matching database role. #135587 +- Regional by row tables with uniqueness constraints where the region is not part of those uniqueness constraints and which also contain non-unique indices will now have those constraints properly enforced when modified at `READ COMMITTED` isolation. This bug was introduced in v24.3.0. #137367

Performance improvements

-- The `_status/nodes_ui` API no longer returns unnecessary metrics in its response. This decreases the payload size of the API and improves the load time of various DB Console pages and components. [#135209][#135209] -- PL/pgSQL loops now execute up to 3-4x faster through improved optimization, particularly when they contain subqueries. This enhancement improves performance for routines with many iterations or nested operations. [#135648][#135648] +- The `_status/nodes_ui` API no longer returns unnecessary metrics in its response. This decreases the payload size of the API and improves the load time of various DB Console pages and components. #135209 +- PL/pgSQL loops now execute up to 3-4x faster through improved optimization, particularly when they contain subqueries. This enhancement improves performance for routines with many iterations or nested operations. #135648 -[#133230]: https://github.com/cockroachdb/cockroach/pull/133230 -[#134238]: https://github.com/cockroachdb/cockroach/pull/134238 -[#134480]: https://github.com/cockroachdb/cockroach/pull/134480 -[#134710]: https://github.com/cockroachdb/cockroach/pull/134710 -[#134729]: https://github.com/cockroachdb/cockroach/pull/134729 -[#134896]: https://github.com/cockroachdb/cockroach/pull/134896 -[#134942]: https://github.com/cockroachdb/cockroach/pull/134942 -[#134970]: https://github.com/cockroachdb/cockroach/pull/134970 -[#134993]: https://github.com/cockroachdb/cockroach/pull/134993 -[#135021]: https://github.com/cockroachdb/cockroach/pull/135021 -[#135037]: https://github.com/cockroachdb/cockroach/pull/135037 -[#135046]: https://github.com/cockroachdb/cockroach/pull/135046 -[#135094]: https://github.com/cockroachdb/cockroach/pull/135094 -[#135120]: https://github.com/cockroachdb/cockroach/pull/135120 -[#135171]: https://github.com/cockroachdb/cockroach/pull/135171 -[#135181]: https://github.com/cockroachdb/cockroach/pull/135181 -[#135209]: https://github.com/cockroachdb/cockroach/pull/135209 -[#135216]: https://github.com/cockroachdb/cockroach/pull/135216 -[#135587]: https://github.com/cockroachdb/cockroach/pull/135587 -[#135648]: https://github.com/cockroachdb/cockroach/pull/135648 -[#135764]: https://github.com/cockroachdb/cockroach/pull/135764 -[#135928]: https://github.com/cockroachdb/cockroach/pull/135928 -[#136011]: https://github.com/cockroachdb/cockroach/pull/136011 -[#136227]: https://github.com/cockroachdb/cockroach/pull/136227 -[#136481]: https://github.com/cockroachdb/cockroach/pull/136481 -[#137367]: https://github.com/cockroachdb/cockroach/pull/137367 -[0d7f6eed3]: https://github.com/cockroachdb/cockroach/commit/0d7f6eed3 -[1f2b1b084]: https://github.com/cockroachdb/cockroach/commit/1f2b1b084 -[3cbd07fbd]: https://github.com/cockroachdb/cockroach/commit/3cbd07fbd -[3f5305a4c]: https://github.com/cockroachdb/cockroach/commit/3f5305a4c -[965dded2a]: https://github.com/cockroachdb/cockroach/commit/965dded2a -[989a49c3f]: https://github.com/cockroachdb/cockroach/commit/989a49c3f -[9951e3e61]: https://github.com/cockroachdb/cockroach/commit/9951e3e61 diff --git a/src/current/_includes/releases/v24.3/v24.3.10.md b/src/current/_includes/releases/v24.3/v24.3.10.md index 02bf01d5a16..5f26fc79fd9 100644 --- a/src/current/_includes/releases/v24.3/v24.3.10.md +++ b/src/current/_includes/releases/v24.3/v24.3.10.md @@ -7,7 +7,6 @@ Release Date: April 9, 2025

Bug fixes

- Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144060][#144060] + #144060 -[#144060]: https://github.com/cockroachdb/cockroach/pull/144060 diff --git a/src/current/_includes/releases/v24.3/v24.3.11.md b/src/current/_includes/releases/v24.3/v24.3.11.md index 83905423c01..90bf3048c26 100644 --- a/src/current/_includes/releases/v24.3/v24.3.11.md +++ b/src/current/_includes/releases/v24.3/v24.3.11.md @@ -6,6 +6,5 @@ Release Date: April 28, 2025

Bug fixes

-- Fixed a rare corruption bug that impacts import and materialized views. [#144661][#144661] +- Fixed a rare corruption bug that impacts import and materialized views. #144661 -[#144661]: https://github.com/cockroachdb/cockroach/pull/144661 diff --git a/src/current/_includes/releases/v24.3/v24.3.12.md b/src/current/_includes/releases/v24.3/v24.3.12.md index 66b169eeff6..a57e818a29f 100644 --- a/src/current/_includes/releases/v24.3/v24.3.12.md +++ b/src/current/_includes/releases/v24.3/v24.3.12.md @@ -10,55 +10,33 @@ Release Date: April 30, 2025

SQL language changes

-- Added the `WITH IGNORE_FOREIGN_KEYS` option to `SHOW CREATE TABLE` which omits foreign key constraints from the output schema. This option is also allowed in `SHOW CREATE VIEW`, but has no effect. It cannot be combined with the `WITH REDACT` option. [#142162][#142162] -- `EXPLAIN ANALYZE` statements now display the number of transaction retries and time spent retrying, if non-zero, in the plan output. [#142929][#142929] -- A new `execution time` statistic is now reported on `EXPLAIN ANALYZE` output for most operators. Previously, this statistic was only available on the DistSQL diagrams in `EXPLAIN ANALYZE (DISTSQL)` output. [#143897][#143897] +- Added the `WITH IGNORE_FOREIGN_KEYS` option to `SHOW CREATE TABLE` which omits foreign key constraints from the output schema. This option is also allowed in `SHOW CREATE VIEW`, but has no effect. It cannot be combined with the `WITH REDACT` option. #142162 +- `EXPLAIN ANALYZE` statements now display the number of transaction retries and time spent retrying, if non-zero, in the plan output. #142929 +- A new `execution time` statistic is now reported on `EXPLAIN ANALYZE` output for most operators. Previously, this statistic was only available on the DistSQL diagrams in `EXPLAIN ANALYZE (DISTSQL)` output. #143897

Operational changes

-- Added the cluster setting `server.child_metrics.include_aggregate.enabled` (default: `true`) that controls the behavior of Prometheus child metrics reporting (`/_status/vars`). When set to `true`, child metrics include an aggregate time series, maintaining the existing behavior. When set to `false`, it stops reporting the aggregate time series, preventing double counting when querying metrics. [#142745][#142745] -- The `sys.cpu.host.combined.percent-normalized` metric has been updated to include additional counters for more accurate host CPU measurement and to reduce underreporting. It now accounts for time spent processing hardware (`irq`) and software (`softirq`) interrupts, as well as `nice` time, which represents low-priority user-mode activity. [#142905][#142905] -- The `server.client_cert_expiration_cache.capacity` cluster setting has been removed. The `security.certificate.expiration.client` and `security.certificate.ttl.client` metrics now report the lowest value observed for a user in the last 24 hours. [#143591][#143591] -- SQL queries run on the source cluster by logical data replication (LDR) and physical cluster replication (PCR) will account to internal metrics like `sql.statements.active.internal` instead of the metrics like `sql.statements.active` that are used to monitor application workload. [#145115][#145115] +- Added the cluster setting `server.child_metrics.include_aggregate.enabled` (default: `true`) that controls the behavior of Prometheus child metrics reporting (`/_status/vars`). When set to `true`, child metrics include an aggregate time series, maintaining the existing behavior. When set to `false`, it stops reporting the aggregate time series, preventing double counting when querying metrics. #142745 +- The `sys.cpu.host.combined.percent-normalized` metric has been updated to include additional counters for more accurate host CPU measurement and to reduce underreporting. It now accounts for time spent processing hardware (`irq`) and software (`softirq`) interrupts, as well as `nice` time, which represents low-priority user-mode activity. #142905 +- The `server.client_cert_expiration_cache.capacity` cluster setting has been removed. The `security.certificate.expiration.client` and `security.certificate.ttl.client` metrics now report the lowest value observed for a user in the last 24 hours. #143591 +- SQL queries run on the source cluster by logical data replication (LDR) and physical cluster replication (PCR) will account to internal metrics like `sql.statements.active.internal` instead of the metrics like `sql.statements.active` that are used to monitor application workload. #145115

Bug fixes

-- Fixed a bug in client certificate expiration metrics, `security.certificate.expiration.client` and `security.certificate.ttl.client`. [#142843][#142843] -- Fast failback could succeed even if the standby cluster protected timestamp had been removed, causing the reverse physical cluster replication (PCR) stream to enter a crashing loop. This patch ensures the failback command fast fails. [#143078][#143078] -- Fixed a bug that caused changefeeds to fail on startup when scanning a single key. [#143149][#143149] -- MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amount of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. [#143276][#143276] -- Fixed a bug where calling a stored procedure could drop the procedure if it had `OUT` parameters that were not used by the calling routine. This bug had existed since PL/pgSQL `CALL` statements were introduced in v24.1. [#143289][#143289] -- Fixed a bug where CockroachDB would encounter an internal error when decoding the gists of plans with `CALL` statements. The bug had been present since v23.2. [#143314][#143314] -- The reader virtual cluster now starts if the user begins a physical cluster replication (PCR) stream from a cursor via `ALTER VIRTUAL CLUSTER virtual_cluster START REPLICATION OF virtual_cluster ON pgurl_physical_cluster WITH READ VIRTUAL CLUSTER`. [#143369][#143369] -- Fixed a crash due to "use of enum metadata before hydration" when using logical data replication (LDR) with user-defined types (UDTs). [#143376][#143376] -- Fixed a potential deadlock that could occur during client certificate updates while metrics were being collected. This issue affected the reliability of certificate expiration reporting. [#143591][#143591] -- Fixed a bug in v24.1.14, v24.3.7, v24.3.8, and v25.1 that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. [#143636][#143636] -- Previously, the fields `maximum memory usage` and `max sql temp disk usage` in the `EXPLAIN ANALYZE` output could be under-reported for distributed plans when memory-intensive operations were fully performed on the remote nodes. This is now fixed. The bug existed in v22.1 and later. [#143793][#143793] -- The `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER` syntax is now supported for adding a reader virtual cluster for an existing PCR standby. [#143905][#143905] -- Previously, whenever CockroachDB collected a statement bundle when plan-gist-based matching was used, `plan.txt` would be incomplete. This is now fixed. The bug had been present since the introduction of plan-gist-based matching in v23.1, and was partially addressed in v24.2. [#143935][#143935] -- Fixed a bug where CockroachDB could encounter a `cannot specify timestamp older than ...` error during table statistics collection in some cases (e.g., when the cluster is overloaded). The bug was present since v19.1. [#144017][#144017] -- Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. [#144030][#144030] -- Fixed a rare corruption bug that impacts `IMPORT` and materialized views. [#144688][#144688] +- Fixed a bug in client certificate expiration metrics, `security.certificate.expiration.client` and `security.certificate.ttl.client`. #142843 +- Fast failback could succeed even if the standby cluster protected timestamp had been removed, causing the reverse physical cluster replication (PCR) stream to enter a crashing loop. This patch ensures the failback command fast fails. #143078 +- Fixed a bug that caused changefeeds to fail on startup when scanning a single key. #143149 +- MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amount of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. #143276 +- Fixed a bug where calling a stored procedure could drop the procedure if it had `OUT` parameters that were not used by the calling routine. This bug had existed since PL/pgSQL `CALL` statements were introduced in v24.1. #143289 +- Fixed a bug where CockroachDB would encounter an internal error when decoding the gists of plans with `CALL` statements. The bug had been present since v23.2. #143314 +- The reader virtual cluster now starts if the user begins a physical cluster replication (PCR) stream from a cursor via `ALTER VIRTUAL CLUSTER virtual_cluster START REPLICATION OF virtual_cluster ON pgurl_physical_cluster WITH READ VIRTUAL CLUSTER`. #143369 +- Fixed a crash due to "use of enum metadata before hydration" when using logical data replication (LDR) with user-defined types (UDTs). #143376 +- Fixed a potential deadlock that could occur during client certificate updates while metrics were being collected. This issue affected the reliability of certificate expiration reporting. #143591 +- Fixed a bug in v24.1.14, v24.3.7, v24.3.8, and v25.1 that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. #143636 +- Previously, the fields `maximum memory usage` and `max sql temp disk usage` in the `EXPLAIN ANALYZE` output could be under-reported for distributed plans when memory-intensive operations were fully performed on the remote nodes. This is now fixed. The bug existed in v22.1 and later. #143793 +- The `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER` syntax is now supported for adding a reader virtual cluster for an existing PCR standby. #143905 +- Previously, whenever CockroachDB collected a statement bundle when plan-gist-based matching was used, `plan.txt` would be incomplete. This is now fixed. The bug had been present since the introduction of plan-gist-based matching in v23.1, and was partially addressed in v24.2. #143935 +- Fixed a bug where CockroachDB could encounter a `cannot specify timestamp older than ...` error during table statistics collection in some cases (e.g., when the cluster is overloaded). The bug was present since v19.1. #144017 +- Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. #144030 +- Fixed a rare corruption bug that impacts `IMPORT` and materialized views. #144688 -[#144030]: https://github.com/cockroachdb/cockroach/pull/144030 -[#143314]: https://github.com/cockroachdb/cockroach/pull/143314 -[#143905]: https://github.com/cockroachdb/cockroach/pull/143905 -[#142905]: https://github.com/cockroachdb/cockroach/pull/142905 -[#143591]: https://github.com/cockroachdb/cockroach/pull/143591 -[#143276]: https://github.com/cockroachdb/cockroach/pull/143276 -[#143289]: https://github.com/cockroachdb/cockroach/pull/143289 -[#143369]: https://github.com/cockroachdb/cockroach/pull/143369 -[#143793]: https://github.com/cockroachdb/cockroach/pull/143793 -[#142162]: https://github.com/cockroachdb/cockroach/pull/142162 -[#142745]: https://github.com/cockroachdb/cockroach/pull/142745 -[#143935]: https://github.com/cockroachdb/cockroach/pull/143935 -[#144688]: https://github.com/cockroachdb/cockroach/pull/144688 -[#144017]: https://github.com/cockroachdb/cockroach/pull/144017 -[#145115]: https://github.com/cockroachdb/cockroach/pull/145115 -[#142929]: https://github.com/cockroachdb/cockroach/pull/142929 -[#143149]: https://github.com/cockroachdb/cockroach/pull/143149 -[#143078]: https://github.com/cockroachdb/cockroach/pull/143078 -[#143376]: https://github.com/cockroachdb/cockroach/pull/143376 -[#143636]: https://github.com/cockroachdb/cockroach/pull/143636 -[#143897]: https://github.com/cockroachdb/cockroach/pull/143897 -[#142843]: https://github.com/cockroachdb/cockroach/pull/142843 diff --git a/src/current/_includes/releases/v24.3/v24.3.13.md b/src/current/_includes/releases/v24.3/v24.3.13.md index 4002919b8b8..101be532cf5 100644 --- a/src/current/_includes/releases/v24.3/v24.3.13.md +++ b/src/current/_includes/releases/v24.3/v24.3.13.md @@ -7,8 +7,7 @@ Release Date: May 15, 2025

Operational changes

- The default value of the `admission.l0_file_count_overload_threshold` cluster setting is now `4000`. This change improves stability under high write load and during Write-Ahead Log (WAL) failover by addressing token exhaustion. - [#146597][#146597] + #146597 -[#146597]: https://github.com/cockroachdb/cockroach/pull/146597 diff --git a/src/current/_includes/releases/v24.3/v24.3.14.md b/src/current/_includes/releases/v24.3/v24.3.14.md index a2a79c23faa..5d89c504b54 100644 --- a/src/current/_includes/releases/v24.3/v24.3.14.md +++ b/src/current/_includes/releases/v24.3/v24.3.14.md @@ -7,75 +7,54 @@ Release Date: May 28, 2025

Operational changes

- Changed the default value of the cluster setting `admission.l0_file_count_overload_threshold` to `4000`. - [#145919][#145919] + #145919 - SQL queries run on the source cluster by logical data replication (LDR) and physical cluster replication (PCR) will account to internal metrics like `sql.statements.active.internal` instead of the metrics like `sql.statements.active` that are used to monitor application workload. - [#145114][#145114] + #145114

DB Console changes

- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. - [#145988][#145988] + #145988

Bug fixes

- Fixed a bug where using values `changefeed.aggregator.flush_jitter` and `min_checkpoint_frequency` such that `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. - [#144425][#144425] + #144425 - Fixed a bug in the DB Console where the **Drop unused index** tag appeared multiple times for an index on the **Indexes** tab of the table details page. - [#144652][#144652] + #144652 - Fixed the following bugs in the **Schedules** page of the DB Console: - Fixed a bug where the **Schedules** page displayed only a subset of a cluster's schedules. The **Schedules** page now correctly displays all schedules. - Fixed a bug where manually updating the `show` or `status` parameters in the URL (e.g., `http://127.0.0.1:8080/#/schedules?status=ACTIVE&show=50`) caused the **Schedules** page to fail to load. - [#144805][#144805] + #144805 - Fixed a bug in the **SQL Activity Statements** page where filtering by **Statement Type** returned no results. The filter now works as expected. - [#144854][#144854] + #144854 - Improved the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. - [#145073][#145073] + #145073 - Fixed a bug where running `DROP INDEX` on a hash-sharded index did not properly detect dependencies from functions and procedures on the shard column. This caused the `DROP INDEX` statement to fail with an internal validation error. Now the statement returns a correct error message, and using `DROP INDEX ... CASCADE` works as expected by dropping the dependent functions and procedures. - [#145392][#145392] + #145392 - Fixed a bug where a node that was drained as part of decommissioning may have interrupted SQL connections that were still active during drain (and for which drain would have been expected to wait). - [#145447][#145447] + #145447 - Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. - [#145543][#145543] + #145543 - Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. - [#145574][#145574] + #145574 - Fixed a bug that could cause a row-level TTL job to fail with the error "comparison of two different versions of enum" if an `ENUM` type referenced by the table experienced a schema change. - [#145915][#145915] + #145915 - Fixed a bug where the physical cluster replication (PCR) reader catalog job could hit validation errors when schema objects had dependencies between them (for example, when a sequence's default expression was being removed). - [#145997][#145997] + #145997 - Fixed a bug where orphaned leases were not properly cleaned up. - [#146096][#146096] + #146096 - Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. - [#146198][#146198] + #146198 - Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. - [#146222][#146222] + #146222 - Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. - [#146324][#146324] + #146324 - Fixed a bug where an invalid comment in the `system.comment` table for a schema object could make it inaccessible. - [#146416][#146416] + #146416 - Fixed a bug in the rangefeed restarts metric that was introduced in v23.2. - [#133978][#133978] + #133978 - Fixed a rare corruption bug that impacts import and materialized views. - [#144661][#144661] + #144661 -[#144854]: https://github.com/cockroachdb/cockroach/pull/144854 -[#145915]: https://github.com/cockroachdb/cockroach/pull/145915 -[#146096]: https://github.com/cockroachdb/cockroach/pull/146096 -[#145574]: https://github.com/cockroachdb/cockroach/pull/145574 -[#145997]: https://github.com/cockroachdb/cockroach/pull/145997 -[#146198]: https://github.com/cockroachdb/cockroach/pull/146198 -[#146222]: https://github.com/cockroachdb/cockroach/pull/146222 -[#145919]: https://github.com/cockroachdb/cockroach/pull/145919 -[#144652]: https://github.com/cockroachdb/cockroach/pull/144652 -[#145073]: https://github.com/cockroachdb/cockroach/pull/145073 -[#145543]: https://github.com/cockroachdb/cockroach/pull/145543 -[#144661]: https://github.com/cockroachdb/cockroach/pull/144661 -[#144425]: https://github.com/cockroachdb/cockroach/pull/144425 -[#144805]: https://github.com/cockroachdb/cockroach/pull/144805 -[#146416]: https://github.com/cockroachdb/cockroach/pull/146416 -[#133978]: https://github.com/cockroachdb/cockroach/pull/133978 -[#145114]: https://github.com/cockroachdb/cockroach/pull/145114 -[#145988]: https://github.com/cockroachdb/cockroach/pull/145988 -[#145392]: https://github.com/cockroachdb/cockroach/pull/145392 -[#145447]: https://github.com/cockroachdb/cockroach/pull/145447 -[#146324]: https://github.com/cockroachdb/cockroach/pull/146324 diff --git a/src/current/_includes/releases/v24.3/v24.3.15.md b/src/current/_includes/releases/v24.3/v24.3.15.md index 78d6f7b4d91..fc90445dda2 100644 --- a/src/current/_includes/releases/v24.3/v24.3.15.md +++ b/src/current/_includes/releases/v24.3/v24.3.15.md @@ -7,46 +7,34 @@ Release Date: June 25, 2025

SQL language changes

- Added the metrics `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count`, which count the number of automatic retries of SQL transactions and statements, respectively, within the database. These metrics differ from the related `txn.restarts.*` metrics, which count retryable errors emitted by the KV layer that must be retried. The new `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count` metrics count auto-retry actions taken by the SQL layer in response to some of those retryable errors. - [#148229][#148229] + #148229 - Added a session variable `initial_retry_backoff_for_read_committed` that controls the initial backoff duration when retrying an individual statement in an explicit `READ COMMITTED` transaction. A duration of `0` disables exponential backoff. If a statement in an explicit `READ COMMITTED` transaction is failing with the `40001` error `ERROR: restart transaction: read committed retry limit exceeded; set by max_retries_for_read_committed=...`, then you should set `initial_retry_backoff_for_read_committed` to a duration proportional to the typical execution time of the statement (in addition to also increasing `max_retries_for_read_committed`). - [#148229][#148229] + #148229

Bug fixes

- Fixed a bug that could cause an `AFTER` trigger to fail with `client already committed or rolled back the transaction` if the query also contained foreign-key cascades. The bug had existed since `AFTER` triggers were introduced in v24.3. - [#146974][#146974] + #146974 - Fixed a bug that could potentially cause a changefeed to complete erroneously when one of its watched tables encounters a schema change. - [#147031][#147031] + #147031 - Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. - [#147221][#147221] + #147221 - Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). - [#147337][#147337] + #147337 - Fixed a bug that caused the optimizer to ignore index hints when optimizing some forms of prepared statements. This could result in one of two unexpected behaviors: a query errors with the message `index cannot be used for this query` when the index can actually be used; or a query uses an index that does not adhere to the hint. The hints relevant to this bug are regular index hints, e.g., `SELECT * FROM tab@index`, `FORCE_INVERTED_INDEX`, and `FORCE_ZIGZAG`. - [#147415][#147415] + #147415 - Fixed the database filter in the DB Console's Hot Ranges page, which was broken in v24.3.3, and updated the locality filter to remove duplicate entries. - [#147444][#147444] + #147444 - Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in v23.2.22, v24.1.14, v24.3.9, v25.1.2, and the v25.2 alpha. - [#147458][#147458] + #147458 - Fixed a bug where prepared statements on schema changes could fail with runtime errors. - [#147669][#147669] + #147669 - Fixed a bug where `ALTER TABLE` was modifying identity attributes on columns not backed by a sequence. - [#147768][#147768] -- Fixed an issue with logical data replication (LDR) where the presence of a unique index could have caused spurious dead-letter queue (DLQ) entries if the unique index had a smaller index ID than the primary key index. [#147354][#147354] + #147768 +- Fixed an issue with logical data replication (LDR) where the presence of a unique index could have caused spurious dead-letter queue (DLQ) entries if the unique index had a smaller index ID than the primary key index. #147354

Performance improvements

- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. - [#147211][#147211] - -[#147031]: https://github.com/cockroachdb/cockroach/pull/147031 -[#147221]: https://github.com/cockroachdb/cockroach/pull/147221 -[#147337]: https://github.com/cockroachdb/cockroach/pull/147337 -[#147415]: https://github.com/cockroachdb/cockroach/pull/147415 -[#147444]: https://github.com/cockroachdb/cockroach/pull/147444 -[#147669]: https://github.com/cockroachdb/cockroach/pull/147669 -[#147211]: https://github.com/cockroachdb/cockroach/pull/147211 -[#148229]: https://github.com/cockroachdb/cockroach/pull/148229 -[#146974]: https://github.com/cockroachdb/cockroach/pull/146974 -[#147458]: https://github.com/cockroachdb/cockroach/pull/147458 -[#147768]: https://github.com/cockroachdb/cockroach/pull/147768 -[#147354]: https://github.com/cockroachdb/cockroach/pull/147354 + #147211 + diff --git a/src/current/_includes/releases/v24.3/v24.3.16.md b/src/current/_includes/releases/v24.3/v24.3.16.md index abc1bb4c8f6..49b626c6a9e 100644 --- a/src/current/_includes/releases/v24.3/v24.3.16.md +++ b/src/current/_includes/releases/v24.3/v24.3.16.md @@ -7,23 +7,23 @@ Release Date: July 28, 2025

SQL language changes

- Added a session variable `initial_retry_backoff_for_read_committed` that controls the initial backoff duration when retrying an individual statement in an explicit `READ COMMITTED` transaction. A duration of `0` disables exponential backoff. If a statement in an explicit `READ COMMITTED` transaction is failing with the `40001` error `ERROR: restart transaction: read committed retry limit exceeded; set by max_retries_for_read_committed=...`, then you should set `initial_retry_backoff_for_read_committed` to a duration proportional to the typical execution time of the statement (in addition to also increasing `max_retries_for_read_committed`). - [#148228][#148228] + #148228 - Added the metrics `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count`, which count the number of automatic retries of SQL transactions and statements, respectively, within the database. These metrics differ from the related `txn.restarts.*` metrics, which count retryable errors emitted by the KV layer that must be retried. The new `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count` metrics count auto-retry actions taken by the SQL layer in response to some of those retryable errors. - [#148228][#148228] + #148228

Operational changes

- Introduced a cluster setting, `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. - [#149850][#149850] + #149850

Bug fixes

- Fixed a data race in the `cloudstorage` sink. - [#147161][#147161] + #147161 - Fixed an error in `crdb_internal.table_spans` that could occur when a table's schema had been dropped. - [#148048][#148048] + #148048 - Fixed a bug where `libpq` clients using the async API could hang with large result sets (Python: psycopg; Ruby: ActiveRecord, ruby-pg). - [#148470][#148470] + #148470 - The `RESET ALL` statement no longer affects the following session variables: - `is_superuser` - `role` @@ -33,15 +33,8 @@ Release Date: July 28, 2025 - `transaction_status` - `transaction_read_only` - This better matches PostgreSQL behavior for `RESET ALL`. In addition, the `DISCARD ALL` statement no longer errors when `default_transaction_use_follower_reads` is enabled. [#149431][#149431] + This better matches PostgreSQL behavior for `RESET ALL`. In addition, the `DISCARD ALL` statement no longer errors when `default_transaction_use_follower_reads` is enabled. #149431 - Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. - [#150342][#150342] + #150342 -[#150342]: https://github.com/cockroachdb/cockroach/pull/150342 -[#148228]: https://github.com/cockroachdb/cockroach/pull/148228 -[#149850]: https://github.com/cockroachdb/cockroach/pull/149850 -[#147161]: https://github.com/cockroachdb/cockroach/pull/147161 -[#148048]: https://github.com/cockroachdb/cockroach/pull/148048 -[#148470]: https://github.com/cockroachdb/cockroach/pull/148470 -[#149431]: https://github.com/cockroachdb/cockroach/pull/149431 diff --git a/src/current/_includes/releases/v24.3/v24.3.17.md b/src/current/_includes/releases/v24.3/v24.3.17.md index b8ce1d8830f..92facebd4a9 100644 --- a/src/current/_includes/releases/v24.3/v24.3.17.md +++ b/src/current/_includes/releases/v24.3/v24.3.17.md @@ -7,7 +7,6 @@ Release Date: August 1, 2025

Bug fixes

- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. - [#151094][#151094] + #151094 -[#151094]: https://github.com/cockroachdb/cockroach/pull/151094 diff --git a/src/current/_includes/releases/v24.3/v24.3.18.md b/src/current/_includes/releases/v24.3/v24.3.18.md index 770a3e356ec..c8101d9cbb7 100644 --- a/src/current/_includes/releases/v24.3/v24.3.18.md +++ b/src/current/_includes/releases/v24.3/v24.3.18.md @@ -6,7 +6,6 @@ Release Date: August 8, 2025

Bug fixes

-- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. [#151564][#151564] +- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. #151564 -[#151564]: https://github.com/cockroachdb/cockroach/pull/151564 diff --git a/src/current/_includes/releases/v24.3/v24.3.19.md b/src/current/_includes/releases/v24.3/v24.3.19.md index f03ab16406c..4643b5f3551 100644 --- a/src/current/_includes/releases/v24.3/v24.3.19.md +++ b/src/current/_includes/releases/v24.3/v24.3.19.md @@ -8,39 +8,29 @@ Release Date: August 22, 2025 - Backporting detailed error logging logic gated behind a cluster setting. The cluster setting enables detailed - error logging for messages exceeding Kafka v2 size limit. [#150182][#150182] -- Changefeeds emitting to Kafka sinks that were created in CockroachDB v24.2.1+, or v23.2.10+ and v24.1.4+ with the `changefeed.new_kafka_sink.enabled` cluster setting enabled now include the message key, size, and MVCC timestamp in "message too large" error logs. [#150182][#150182] + error logging for messages exceeding Kafka v2 size limit. #150182 +- Changefeeds emitting to Kafka sinks that were created in CockroachDB v24.2.1+, or v23.2.10+ and v24.1.4+ with the `changefeed.new_kafka_sink.enabled` cluster setting enabled now include the message key, size, and MVCC timestamp in "message too large" error logs. #150182

Operational changes

-- Introduced a cluster setting, `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. [#149841][#149841] -- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. [#151489][#151489] +- Introduced a cluster setting, `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. #149841 +- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. #151489

Bug fixes

-- Fixed an issue where the `mvcc_timestamp` field was incorrectly returning zero values when used with CDC queries. The timestamp is now emitted correctly. [#147112][#147112] -- Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. [#150336][#150336] -- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. [#151081][#151081] -- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. [#151141][#151141] -- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. [#151223][#151223] +- Fixed an issue where the `mvcc_timestamp` field was incorrectly returning zero values when used with CDC queries. The timestamp is now emitted correctly. #147112 +- Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. #150336 +- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. #151081 +- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. #151141 +- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. #151223

Build changes

-- Upgrade Go to consume security fixes [#150989][#150989] +- Upgrade Go to consume security fixes #150989

Miscellaneous

- Restore will now re-attempt `AdminSplit` KV requests - instead of immediately failing and pausing the job. [#149619][#149619] - - -[#151489]: https://github.com/cockroachdb/cockroach/pull/151489 -[#147112]: https://github.com/cockroachdb/cockroach/pull/147112 -[#151081]: https://github.com/cockroachdb/cockroach/pull/151081 -[#151223]: https://github.com/cockroachdb/cockroach/pull/151223 -[#150989]: https://github.com/cockroachdb/cockroach/pull/150989 -[#150182]: https://github.com/cockroachdb/cockroach/pull/150182 -[#149841]: https://github.com/cockroachdb/cockroach/pull/149841 -[#149619]: https://github.com/cockroachdb/cockroach/pull/149619 -[#150336]: https://github.com/cockroachdb/cockroach/pull/150336 -[#151141]: https://github.com/cockroachdb/cockroach/pull/151141 + instead of immediately failing and pausing the job. #149619 + + diff --git a/src/current/_includes/releases/v24.3/v24.3.2.md b/src/current/_includes/releases/v24.3/v24.3.2.md index 01f29d8796a..4f484fb1182 100644 --- a/src/current/_includes/releases/v24.3/v24.3.2.md +++ b/src/current/_includes/releases/v24.3/v24.3.2.md @@ -6,7 +6,6 @@ Release Date: December 26, 2024

SQL language changes

-- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. [#137943][#137943] +- Added the `legacy_varchar_typing` session setting. When set to `on`, type-checking comparisons involving `VARCHAR` columns behave as they did in all previous versions. When set to `off`, type-checking of these comparisons is more strict and queries that previously succeeded may now error with the message `unsupported comparison operator`. These errors can be fixed by adding explicit type casts. The `legacy_varchar_typing` session setting is on by default. #137943 -[#137943]: https://github.com/cockroachdb/cockroach/pull/137943 diff --git a/src/current/_includes/releases/v24.3/v24.3.20.md b/src/current/_includes/releases/v24.3/v24.3.20.md index 798f12bf656..3bb6438c325 100644 --- a/src/current/_includes/releases/v24.3/v24.3.20.md +++ b/src/current/_includes/releases/v24.3/v24.3.20.md @@ -6,36 +6,27 @@ Release Date: September 22, 2025

SQL language changes

-- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. [#152602][#152602] +- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. #152602

Bug fixes

-- Fixed a bug where invalid default expressions could cause backfilling schema changes to retry forever. [#147015][#147015] -- Fixed a bug that could cause excessive memory allocations when compacting timeseries keys. [#151813][#151813] -- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. [#152313][#152313] +- Fixed a bug where invalid default expressions could cause backfilling schema changes to retry forever. #147015 +- Fixed a bug that could cause excessive memory allocations when compacting timeseries keys. #151813 +- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. #152313 - Fixed a bug that allowed foreign-key violations to result from some combinations of concurrent `READ COMMITTED` and `SERIALIZABLE` transactions. If both `SERIALIZABLE` and weaker-isolation transactions will concurrently modify rows involved in foreign-key relationships, the `SERIALIZABLE` transactions must have the following session variables set in order to prevent any possible foreign-key violations: - `SET enable_implicit_fk_locking_for_serializable = on;` - `SET enable_shared_locking_for_serializable = on;` - - `SET enable_durable_locking_for_serializable = on;` [#152376][#152376] -- Added an automatic repair for dangling or invalid entries in the `system.comments` table. [#152468][#152468] -- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. [#152742][#152742] + - `SET enable_durable_locking_for_serializable = on;` #152376 +- Added an automatic repair for dangling or invalid entries in the `system.comments` table. #152468 +- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. #152742

Performance improvements

-- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. [#152630][#152630] +- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. #152630

Miscellaneous

- Tunes S3 client retry behavior to be more reliable in the - presence of correlated errors. [#151875][#151875] - - -[#152742]: https://github.com/cockroachdb/cockroach/pull/152742 -[#152630]: https://github.com/cockroachdb/cockroach/pull/152630 -[#152602]: https://github.com/cockroachdb/cockroach/pull/152602 -[#152313]: https://github.com/cockroachdb/cockroach/pull/152313 -[#152376]: https://github.com/cockroachdb/cockroach/pull/152376 -[#152468]: https://github.com/cockroachdb/cockroach/pull/152468 -[#151875]: https://github.com/cockroachdb/cockroach/pull/151875 -[#147015]: https://github.com/cockroachdb/cockroach/pull/147015 -[#151813]: https://github.com/cockroachdb/cockroach/pull/151813 + presence of correlated errors. #151875 + + diff --git a/src/current/_includes/releases/v24.3/v24.3.21.md b/src/current/_includes/releases/v24.3/v24.3.21.md index 42308a470b9..38e24191afd 100644 --- a/src/current/_includes/releases/v24.3/v24.3.21.md +++ b/src/current/_includes/releases/v24.3/v24.3.21.md @@ -6,10 +6,7 @@ Release Date: October 17, 2025

Bug fixes

-- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. [#153604][#153604] -- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. [#154284][#154284] -- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. [#154399][#154399] +- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. #153604 +- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. #154284 +- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. #154399 -[#153604]: https://github.com/cockroachdb/cockroach/pull/153604 -[#154284]: https://github.com/cockroachdb/cockroach/pull/154284 -[#154399]: https://github.com/cockroachdb/cockroach/pull/154399 diff --git a/src/current/_includes/releases/v24.3/v24.3.22.md b/src/current/_includes/releases/v24.3/v24.3.22.md index b656d2858ea..08bac29fbf3 100644 --- a/src/current/_includes/releases/v24.3/v24.3.22.md +++ b/src/current/_includes/releases/v24.3/v24.3.22.md @@ -6,6 +6,5 @@ Release Date: October 30, 2025

Bug fixes

-- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. [#156311][#156311] +- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. #156311 -[#156311]: https://github.com/cockroachdb/cockroach/pull/156311 diff --git a/src/current/_includes/releases/v24.3/v24.3.23.md b/src/current/_includes/releases/v24.3/v24.3.23.md index a1cba65e22c..63573492798 100644 --- a/src/current/_includes/releases/v24.3/v24.3.23.md +++ b/src/current/_includes/releases/v24.3/v24.3.23.md @@ -6,16 +6,13 @@ Release Date: November 14, 2025

SQL language changes

-- Added the `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. [#156598][#156598] -- Added the `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. [#156598][#156598] -- Added the `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. [#156598][#156598] -- Added the `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. [#156598][#156598] +- Added the `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. #156598 +- Added the `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. #156598 +- Added the `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. #156598 +- Added the `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. #156598

Bug fixes

-- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. [#155971][#155971] -- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. [#156549][#156549] +- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. #155971 +- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. #156549 -[#155971]: https://github.com/cockroachdb/cockroach/pull/155971 -[#156549]: https://github.com/cockroachdb/cockroach/pull/156549 -[#156598]: https://github.com/cockroachdb/cockroach/pull/156598 diff --git a/src/current/_includes/releases/v24.3/v24.3.24.md b/src/current/_includes/releases/v24.3/v24.3.24.md index 16f09cd74d2..99a517c5a81 100644 --- a/src/current/_includes/releases/v24.3/v24.3.24.md +++ b/src/current/_includes/releases/v24.3/v24.3.24.md @@ -6,7 +6,6 @@ Release Date: December 12, 2025

Bug fixes

-- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. [#156520][#156520] +- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. #156520 -[#156520]: https://github.com/cockroachdb/cockroach/pull/156520 diff --git a/src/current/_includes/releases/v24.3/v24.3.25.md b/src/current/_includes/releases/v24.3/v24.3.25.md index 9d8c3ececae..b08a251c6e2 100644 --- a/src/current/_includes/releases/v24.3/v24.3.25.md +++ b/src/current/_includes/releases/v24.3/v24.3.25.md @@ -11,10 +11,7 @@ Release Date: January 9, 2026 - The query had an equality filter on a placeholder and a `UNIQUE` column - The column contained _NULL_ values - The placeholder was assigned to _NULL_ during execution - - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. [#159078][#159078] -- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159538][#159538] -- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in 24.3.0 and has been present in all versions since. [#159774][#159774] + - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. #159078 +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. #159538 +- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in 24.3.0 and has been present in all versions since. #159774 -[#159078]: https://github.com/cockroachdb/cockroach/pull/159078 -[#159538]: https://github.com/cockroachdb/cockroach/pull/159538 -[#159774]: https://github.com/cockroachdb/cockroach/pull/159774 diff --git a/src/current/_includes/releases/v24.3/v24.3.26.md b/src/current/_includes/releases/v24.3/v24.3.26.md index 9eaae09f193..bc4aded1326 100644 --- a/src/current/_includes/releases/v24.3/v24.3.26.md +++ b/src/current/_includes/releases/v24.3/v24.3.26.md @@ -6,7 +6,6 @@ Release Date: February 11, 2026

Bug fixes

-- Fixed a bug where `IMPORT` with Avro data using `OCF` format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since approximately v20.1. [#161321][#161321] +- Fixed a bug where `IMPORT` with Avro data using `OCF` format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since approximately v20.1. #161321 -[#161321]: https://github.com/cockroachdb/cockroach/pull/161321 diff --git a/src/current/_includes/releases/v24.3/v24.3.27.md b/src/current/_includes/releases/v24.3/v24.3.27.md index ba984601a8c..b28e97a4c50 100644 --- a/src/current/_includes/releases/v24.3/v24.3.27.md +++ b/src/current/_includes/releases/v24.3/v24.3.27.md @@ -6,9 +6,7 @@ Release Date: February 19, 2026

Bug fixes

-- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. [#163774][#163774] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. [#163800][#163800] +- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. #163774 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. #163800 -[#163774]: https://github.com/cockroachdb/cockroach/pull/163774 -[#163800]: https://github.com/cockroachdb/cockroach/pull/163800 diff --git a/src/current/_includes/releases/v24.3/v24.3.28.md b/src/current/_includes/releases/v24.3/v24.3.28.md index 35f957fc2e5..61719998b64 100644 --- a/src/current/_includes/releases/v24.3/v24.3.28.md +++ b/src/current/_includes/releases/v24.3/v24.3.28.md @@ -6,7 +6,6 @@ Release Date: March 5, 2026

Bug fixes

-- Fixed a bug where an internal context structure could grow unboundedly over time. In rare cases, on nodes running continuously for several months or more, this could cause the `cockroach` process to appear stalled when a CPU profile was requested.​​​​​​​​​​​​​​​​ [#164778][#164778] +- Fixed a bug where an internal context structure could grow unboundedly over time. In rare cases, on nodes running continuously for several months or more, this could cause the `cockroach` process to appear stalled when a CPU profile was requested.​​​​​​​​​​​​​​​​ #164778 -[#164778]: https://github.com/cockroachdb/cockroach/pull/164778 diff --git a/src/current/_includes/releases/v24.3/v24.3.29.md b/src/current/_includes/releases/v24.3/v24.3.29.md index fa7d3a9e81d..b7b2e6b9605 100644 --- a/src/current/_includes/releases/v24.3/v24.3.29.md +++ b/src/current/_includes/releases/v24.3/v24.3.29.md @@ -6,18 +6,13 @@ Release Date: March 9, 2026

Bug fixes

-- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. [#161438][#161438] -- Fixed a bug that, in rare cases, could cause a node to crash when using a changefeed with the `end_time` option. [#161678][#161678] -- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162117][#162117] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163360][#163360] +- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. #161438 +- Fixed a bug that, in rare cases, could cause a node to crash when using a changefeed with the `end_time` option. #161678 +- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. #162117 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. #163360

Miscellaneous

-- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159900][#159900] +- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. #159900 -[#161438]: https://github.com/cockroachdb/cockroach/pull/161438 -[#161678]: https://github.com/cockroachdb/cockroach/pull/161678 -[#162117]: https://github.com/cockroachdb/cockroach/pull/162117 -[#163360]: https://github.com/cockroachdb/cockroach/pull/163360 -[#159900]: https://github.com/cockroachdb/cockroach/pull/159900 diff --git a/src/current/_includes/releases/v24.3/v24.3.3.md b/src/current/_includes/releases/v24.3/v24.3.3.md index d7883c73883..d81be032500 100644 --- a/src/current/_includes/releases/v24.3/v24.3.3.md +++ b/src/current/_includes/releases/v24.3/v24.3.3.md @@ -11,105 +11,56 @@ Release Date: January 9, 2025 - `changefeed.parallel_io_result_queue_nanos` - `changefeed.sink_batch_hist_nanos` - `changefeed.flush_hist_nanos` - - `changefeed.kafka_throttling_hist_nanos` [#136604][#136604] -- Added support for multiple seed brokers in the new Kafka sink. [#136727][#136727] -- Added a new metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. [#136838][#136838] -- Added support for a new `AWS_USE_PATH_STYLE` parameter in S3 URI parsing. [#136934][#136934] + - `changefeed.kafka_throttling_hist_nanos` #136604 +- Added support for multiple seed brokers in the new Kafka sink. #136727 +- Added a new metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. #136838 +- Added support for a new `AWS_USE_PATH_STYLE` parameter in S3 URI parsing. #136934

SQL language changes

-- Added support for `SHOW TRIGGERS`, which displays the names of all triggers on a table, and whether each trigger is enabled. The user must have any privilege on the table, or be its owner. [#135862][#135862] -- Added support for `SHOW CREATE TRIGGER`, which displays the CREATE statement for a trigger. The user must have any privilege on the table, or be its owner. [#135862][#135862] -- The names of `BEFORE` triggers fired by a mutation are now included in the `EXPLAIN` output, with the trigger-function invocations also visible in the output of verbose `EXPLAIN`. [#135864][#135864] -- `AFTER` triggers are now included in the output of `EXPLAIN` and `EXPLAIN ANALYZE`. [#135864][#135864] -- Added the `legacy_varchar_typing` session setting, which reverts the changes of [#133037](https://github.com/cockroachdb/cockroach/pull/133037) that causes the change in typing behavior described in [#137837](https://github.com/cockroachdb/cockroach/pull/137837). Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. [#137919][#137919] +- Added support for `SHOW TRIGGERS`, which displays the names of all triggers on a table, and whether each trigger is enabled. The user must have any privilege on the table, or be its owner. #135862 +- Added support for `SHOW CREATE TRIGGER`, which displays the CREATE statement for a trigger. The user must have any privilege on the table, or be its owner. #135862 +- The names of `BEFORE` triggers fired by a mutation are now included in the `EXPLAIN` output, with the trigger-function invocations also visible in the output of verbose `EXPLAIN`. #135864 +- `AFTER` triggers are now included in the output of `EXPLAIN` and `EXPLAIN ANALYZE`. #135864 +- Added the `legacy_varchar_typing` session setting, which reverts the changes of #133037 that causes the change in typing behavior described in #137837. Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `on`. #137919

Operational changes

-- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added out of precaution in case it was necessary to revert back to the old behavior for looking up role memberships, but this escape hatch has never been needed in practice since this was added in v23.1. [#136162][#136162] -- Telemetry delivery is now considered successful even in cases of a network timeout. This will prevent throttling in cases outside an operator's control. [#136480][#136480] -- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. [#136929][#136929] -- Added a new configurable parameter `kv.transaction.max_intents_and_locks` that will prevent transactions from creating too many intents. [#137687][#137687] -- Added the metric `txn.count_limit_rejected`, which tracks the KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed. [#137687][#137687] -- Added the metric `txn.count_limit_on_response`, which tracks the number of KV transactions that have exceeded the count limit on a response. [#137687][#137687] +- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added out of precaution in case it was necessary to revert back to the old behavior for looking up role memberships, but this escape hatch has never been needed in practice since this was added in v23.1. #136162 +- Telemetry delivery is now considered successful even in cases of a network timeout. This will prevent throttling in cases outside an operator's control. #136480 +- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. #136929 +- Added a new configurable parameter `kv.transaction.max_intents_and_locks` that will prevent transactions from creating too many intents. #137687 +- Added the metric `txn.count_limit_rejected`, which tracks the KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed. #137687 +- Added the metric `txn.count_limit_on_response`, which tracks the number of KV transactions that have exceeded the count limit on a response. #137687

DB Console changes

-- The link on the Plan Details page to the legacy Table page has been removed. [#136504][#136504] +- The link on the Plan Details page to the legacy Table page has been removed. #136504

Bug fixes

-- Previously, CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements, and this is now fixed. The bug has been present since v22.1. [#134995][#134995] -- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. [#136042][#136042] -- A table that is participating in a logical replication stream can no longer be dropped. Previously this was allowed, which would cause all the replicated rows to end up in the dead-letter queue. [#136255][#136255] -- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. [#136323][#136323] -- Fixed a bug where `CREATE RELATION / TYPE` could leave dangling namespace entries if the schema was concurrently being dropped. [#136378][#136378] -- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. [#136226][#136226] -- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. [#136490][#136490] -- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136630][#136630] -- Changes the table, index contents of the hot ranges page in DB console. [#134988][#134988] -- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. [#136166][#136166] -- CockroachDB now better respects `statement_timeout` limit on queries involving the top K sort and merge join operations. [#136653][#136653] -- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`. The fix ensures proper behavior on cluster restarts. [#137012][#137012] -- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. [#137353][#137353] -- Fixed a bug that causes an incorrect filesystem to be logged as part of the store information. [#137115][#137115] -- Fixed a bug affecting uniqueness enforcement in regional by row tables when using read-committed isolation. The bug, introduced in v24.3.0, could cause internal errors or incorrect uniqueness enforcement in tables that had both non-unique and unique indexes when the region column was not part of the uniqueness constraints. [#137366][#137366] -- Fixed a bug that has existed since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. [#137376][#137376] -- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. [#137675][#137675] -- An issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...` has now been fixed. [#137706][#137706] -- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is active. [#137725][#137725] -- The `pg_catalog.pg_type` table no longer contains `NULL` values for the columns `typinput`, `typoutput`, `typreceive`, and `typsend`. `NULL` values were erroneously added for these columns for the `trigger` type in v24.3.0. This could cause unexpected errors with some client libraries. [#137941][#137941] +- Previously, CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements, and this is now fixed. The bug has been present since v22.1. #134995 +- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. #136042 +- A table that is participating in a logical replication stream can no longer be dropped. Previously this was allowed, which would cause all the replicated rows to end up in the dead-letter queue. #136255 +- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. #136323 +- Fixed a bug where `CREATE RELATION / TYPE` could leave dangling namespace entries if the schema was concurrently being dropped. #136378 +- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. #136226 +- The `idle_in_session_timeout` setting now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. #136490 +- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. #136630 +- Changes the table, index contents of the hot ranges page in DB console. #134988 +- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. #136166 +- CockroachDB now better respects `statement_timeout` limit on queries involving the top K sort and merge join operations. #136653 +- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`. The fix ensures proper behavior on cluster restarts. #137012 +- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. #137353 +- Fixed a bug that causes an incorrect filesystem to be logged as part of the store information. #137115 +- Fixed a bug affecting uniqueness enforcement in regional by row tables when using read-committed isolation. The bug, introduced in v24.3.0, could cause internal errors or incorrect uniqueness enforcement in tables that had both non-unique and unique indexes when the region column was not part of the uniqueness constraints. #137366 +- Fixed a bug that has existed since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. #137376 +- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could lead to failure of the `ALTER` statement. #137675 +- An issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...` has now been fixed. #137706 +- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` setting, allowing index creation even when the cluster setting is active. #137725 +- The `pg_catalog.pg_type` table no longer contains `NULL` values for the columns `typinput`, `typoutput`, `typreceive`, and `typsend`. `NULL` values were erroneously added for these columns for the `trigger` type in v24.3.0. This could cause unexpected errors with some client libraries. #137941

Performance improvements

-- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. [#136162][#136162] +- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. #136162 -[#134988]: https://github.com/cockroachdb/cockroach/pull/134988 -[#134995]: https://github.com/cockroachdb/cockroach/pull/134995 -[#135862]: https://github.com/cockroachdb/cockroach/pull/135862 -[#135864]: https://github.com/cockroachdb/cockroach/pull/135864 -[#136042]: https://github.com/cockroachdb/cockroach/pull/136042 -[#136162]: https://github.com/cockroachdb/cockroach/pull/136162 -[#136166]: https://github.com/cockroachdb/cockroach/pull/136166 -[#136226]: https://github.com/cockroachdb/cockroach/pull/136226 -[#136255]: https://github.com/cockroachdb/cockroach/pull/136255 -[#136323]: https://github.com/cockroachdb/cockroach/pull/136323 -[#136326]: https://github.com/cockroachdb/cockroach/pull/136326 -[#136378]: https://github.com/cockroachdb/cockroach/pull/136378 -[#136480]: https://github.com/cockroachdb/cockroach/pull/136480 -[#136490]: https://github.com/cockroachdb/cockroach/pull/136490 -[#136504]: https://github.com/cockroachdb/cockroach/pull/136504 -[#136604]: https://github.com/cockroachdb/cockroach/pull/136604 -[#136630]: https://github.com/cockroachdb/cockroach/pull/136630 -[#136653]: https://github.com/cockroachdb/cockroach/pull/136653 -[#136680]: https://github.com/cockroachdb/cockroach/pull/136680 -[#136727]: https://github.com/cockroachdb/cockroach/pull/136727 -[#136838]: https://github.com/cockroachdb/cockroach/pull/136838 -[#136929]: https://github.com/cockroachdb/cockroach/pull/136929 -[#136934]: https://github.com/cockroachdb/cockroach/pull/136934 -[#137012]: https://github.com/cockroachdb/cockroach/pull/137012 -[#137051]: https://github.com/cockroachdb/cockroach/pull/137051 -[#137115]: https://github.com/cockroachdb/cockroach/pull/137115 -[#137117]: https://github.com/cockroachdb/cockroach/pull/137117 -[#137353]: https://github.com/cockroachdb/cockroach/pull/137353 -[#137366]: https://github.com/cockroachdb/cockroach/pull/137366 -[#137376]: https://github.com/cockroachdb/cockroach/pull/137376 -[#137462]: https://github.com/cockroachdb/cockroach/pull/137462 -[#137528]: https://github.com/cockroachdb/cockroach/pull/137528 -[#137675]: https://github.com/cockroachdb/cockroach/pull/137675 -[#137687]: https://github.com/cockroachdb/cockroach/pull/137687 -[#137706]: https://github.com/cockroachdb/cockroach/pull/137706 -[#137725]: https://github.com/cockroachdb/cockroach/pull/137725 -[#137919]: https://github.com/cockroachdb/cockroach/pull/137919 -[#137941]: https://github.com/cockroachdb/cockroach/pull/137941 -[10eab82e8]: https://github.com/cockroachdb/cockroach/commit/10eab82e8 -[1433008fc]: https://github.com/cockroachdb/cockroach/commit/1433008fc -[3d88fefff]: https://github.com/cockroachdb/cockroach/commit/3d88fefff -[6867acb55]: https://github.com/cockroachdb/cockroach/commit/6867acb55 -[738f32732]: https://github.com/cockroachdb/cockroach/commit/738f32732 -[849cbad97]: https://github.com/cockroachdb/cockroach/commit/849cbad97 -[8ac7ca4f3]: https://github.com/cockroachdb/cockroach/commit/8ac7ca4f3 -[93078f72c]: https://github.com/cockroachdb/cockroach/commit/93078f72c -[b25f499aa]: https://github.com/cockroachdb/cockroach/commit/b25f499aa -[cd6e53cb7]: https://github.com/cockroachdb/cockroach/commit/cd6e53cb7 -[db086f257]: https://github.com/cockroachdb/cockroach/commit/db086f257 diff --git a/src/current/_includes/releases/v24.3/v24.3.30.md b/src/current/_includes/releases/v24.3/v24.3.30.md index 287ab725c7d..b9d66d93a54 100644 --- a/src/current/_includes/releases/v24.3/v24.3.30.md +++ b/src/current/_includes/releases/v24.3/v24.3.30.md @@ -6,9 +6,7 @@ Release Date: April 3, 2026

Bug fixes

-- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. [#164888][#164888] -- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165571][#165571] +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. #164888 +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. #165571 -[#164888]: https://github.com/cockroachdb/cockroach/pull/164888 -[#165571]: https://github.com/cockroachdb/cockroach/pull/165571 diff --git a/src/current/_includes/releases/v24.3/v24.3.31.md b/src/current/_includes/releases/v24.3/v24.3.31.md index 5f101f17de5..b110a081dee 100644 --- a/src/current/_includes/releases/v24.3/v24.3.31.md +++ b/src/current/_includes/releases/v24.3/v24.3.31.md @@ -6,7 +6,6 @@ Release Date: April 20, 2026

Bug fixes

-- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#168474][#168474] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. #168474 -[#168474]: https://github.com/cockroachdb/cockroach/pull/168474 diff --git a/src/current/_includes/releases/v24.3/v24.3.4.md b/src/current/_includes/releases/v24.3/v24.3.4.md index 0365b6bac6c..b5793de0304 100644 --- a/src/current/_includes/releases/v24.3/v24.3.4.md +++ b/src/current/_includes/releases/v24.3/v24.3.4.md @@ -6,10 +6,9 @@ Release Date: January 31, 2025

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. [#140175][#140175] +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. #140175 {{site.data.alerts.callout_danger}} This fix is present in v24.3.4 and [v24.3.6](#v24-3-6), but was **not** released in [v24.3.5](#v24-3-5). {{site.data.alerts.end}} -[#140175]: https://github.com/cockroachdb/cockroach/pull/140175 diff --git a/src/current/_includes/releases/v24.3/v24.3.5.md b/src/current/_includes/releases/v24.3/v24.3.5.md index 9727c2b3010..4830ec98fd1 100644 --- a/src/current/_includes/releases/v24.3/v24.3.5.md +++ b/src/current/_includes/releases/v24.3/v24.3.5.md @@ -6,73 +6,45 @@ Release Date: February 6, 2025

General changes

-- The protected timestamp records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. [#138654][#138654] +- The protected timestamp records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. #138654

Backward-incompatible changes

-- In [v24.3.4](#v24-3-4-bug-fixes), a bug was fixed that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. This fix is **not** present in v24.3.5, but has been released in [v24.3.6](#v24-3-6). [#140175][#140175] +- In [v24.3.4](#v24-3-4-bug-fixes), a bug was fixed that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. This fix is **not** present in v24.3.5, but has been released in [v24.3.6](#v24-3-6). #140175

SQL language changes

-- Since v23.2 table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. [#140265][#140265] -- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, CockroachDB does consider it a full scan. [#140270][#140270] -- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. [#140270][#140270] -- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of zero, which is the default, indicates no lower bound. Note that if this is set to a value greater than zero, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. [#140270][#140270] +- Since v23.2 table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. #140265 +- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that normally a full scan of a partial index would not be considered a "full scan" for the purposes of the `NO_FULL_SCAN` and `AVOID_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, CockroachDB does consider it a full scan. #140270 +- Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. #140270 +- Added the `optimizer_min_row_count` session setting, which sets a lower bound on row count estimates for relational expressions during query planning. A value of zero, which is the default, indicates no lower bound. Note that if this is set to a value greater than zero, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. #140270

Operational changes

-- Schema object identifiers (e.g., database names, schema names, table names, and function names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. [#138563][#138563] -- Schema object identifiers (e.g., table names, schema names, function names, and type names) are no longer redacted in the `SQL_SCHEMA` log channel. [#138563][#138563] -- Added the metric `sql.schema_changer.object_count`, which counts the number of objects in the cluster. [#138837][#138837] -- The `changefeed.max_behind_nanos` metric now supports scoping with metrics labels. [#139234][#139234] +- Schema object identifiers (e.g., database names, schema names, table names, and function names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. #138563 +- Schema object identifiers (e.g., table names, schema names, function names, and type names) are no longer redacted in the `SQL_SCHEMA` log channel. #138563 +- Added the metric `sql.schema_changer.object_count`, which counts the number of objects in the cluster. #138837 +- The `changefeed.max_behind_nanos` metric now supports scoping with metrics labels. #139234

DB Console changes

-- Added the `/debug/pprof/fgprof` endpoint to capture off-CPU stack traces. Use of this endpoint will have a noticable impact to performance while the endpoint is being triggered. [#138843][#138843] +- Added the `/debug/pprof/fgprof` endpoint to capture off-CPU stack traces. Use of this endpoint will have a noticable impact to performance while the endpoint is being triggered. #138843

Bug fixes

-- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug had been present since at least v23.1. [#137793][#137793] -- `ALTER BACKUP SCHEDULE` no longer fails on schedules whose collection URI contains a space. [#138082][#138082] -- Previously, `SHOW CREATE TABLE` was showing incorrect data for inverted indexes. It now shows the correct data that can be input to CockroachDB to recreate the same table. [#138083][#138083] -- Fixed a timing issue between `ALTER VIEW .. RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. [#137889][#137889] -- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. [#137875][#137875] -- On the **Databases** page, users should no longer see console errors when visiting the **Databases** page directly after node/SQL pod startup. [#138377][#138377] -- In the **Databases** > **Tables** page, the `CREATE` statement will now show up as expected for tables with custom schema names. [#138378][#138378] -- Queries that perform a cast from the string representation of an array containing `GEOMETRY` or `GEOGRAPHY` types to a SQL `ARRAY` type will now succeed. [#138695][#138695] -- Previously, cluster backups taken in a multi-region cluster that had configured the system database with a region configuration could not be restored into a non-multi-region cluster. This is now fixed. [#138787][#138787] -- Fixed a bug that disregarded tuple labels in some cases. This could cause unexpected behavior, such as when converting a tuple to JSON with `to_jsonb`. See #136167 for more details. The incorrect removal of tuple labels bug was introduced in v22.1.0, and changes in v24.3.0 made unexpected behavior due to the bug more likely. [#138840][#138840] -- Previously, CockroachDB could encounter an internal error `comparison of two different versions of enum` in some cases when a user-defined type was modified within a transaction and the following statements read the column of that user-defined type. The bug was introduced in v24.2 and is now fixed. [#138052][#138052] -- Secondary tenants will no longer fatal when issuing HTTP requests during tenant startup. [#138755][#138755] -- Fixed a bug where columns created with `GENERATED ... AS IDENTITY` with the `SERIAL` type could incorrectly fail internal validations. [#139101][#139101] -- When the session variable `allow_role_memberships_to_change_during_transaction` is set, it is now possible to create and drop users quickly even when there are contending transactions on the `system.users` and `system.role_options` system tables. [#139032][#139032] -- Fixed a bug where the error `batch timestamp ... must be after replica GC threshold` could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the `failed` state. [#139250][#139250] -- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug had existed since v23.1. [#136124][#136124] - -[#136124]: https://github.com/cockroachdb/cockroach/pull/136124 -[#137793]: https://github.com/cockroachdb/cockroach/pull/137793 -[#137875]: https://github.com/cockroachdb/cockroach/pull/137875 -[#137889]: https://github.com/cockroachdb/cockroach/pull/137889 -[#137923]: https://github.com/cockroachdb/cockroach/pull/137923 -[#138052]: https://github.com/cockroachdb/cockroach/pull/138052 -[#138082]: https://github.com/cockroachdb/cockroach/pull/138082 -[#138083]: https://github.com/cockroachdb/cockroach/pull/138083 -[#138097]: https://github.com/cockroachdb/cockroach/pull/138097 -[#138303]: https://github.com/cockroachdb/cockroach/pull/138303 -[#138377]: https://github.com/cockroachdb/cockroach/pull/138377 -[#138378]: https://github.com/cockroachdb/cockroach/pull/138378 -[#138563]: https://github.com/cockroachdb/cockroach/pull/138563 -[#138654]: https://github.com/cockroachdb/cockroach/pull/138654 -[#138695]: https://github.com/cockroachdb/cockroach/pull/138695 -[#138755]: https://github.com/cockroachdb/cockroach/pull/138755 -[#138787]: https://github.com/cockroachdb/cockroach/pull/138787 -[#138837]: https://github.com/cockroachdb/cockroach/pull/138837 -[#138840]: https://github.com/cockroachdb/cockroach/pull/138840 -[#138843]: https://github.com/cockroachdb/cockroach/pull/138843 -[#139032]: https://github.com/cockroachdb/cockroach/pull/139032 -[#139101]: https://github.com/cockroachdb/cockroach/pull/139101 -[#139234]: https://github.com/cockroachdb/cockroach/pull/139234 -[#139250]: https://github.com/cockroachdb/cockroach/pull/139250 -[#140265]: https://github.com/cockroachdb/cockroach/pull/140265 -[#140270]: https://github.com/cockroachdb/cockroach/pull/140270 -[#140175]: https://github.com/cockroachdb/cockroach/pull/140175 \ No newline at end of file +- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug had been present since at least v23.1. #137793 +- `ALTER BACKUP SCHEDULE` no longer fails on schedules whose collection URI contains a space. #138082 +- Previously, `SHOW CREATE TABLE` was showing incorrect data for inverted indexes. It now shows the correct data that can be input to CockroachDB to recreate the same table. #138083 +- Fixed a timing issue between `ALTER VIEW .. RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. #137889 +- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. #137875 +- On the **Databases** page, users should no longer see console errors when visiting the **Databases** page directly after node/SQL pod startup. #138377 +- In the **Databases** > **Tables** page, the `CREATE` statement will now show up as expected for tables with custom schema names. #138378 +- Queries that perform a cast from the string representation of an array containing `GEOMETRY` or `GEOGRAPHY` types to a SQL `ARRAY` type will now succeed. #138695 +- Previously, cluster backups taken in a multi-region cluster that had configured the system database with a region configuration could not be restored into a non-multi-region cluster. This is now fixed. #138787 +- Fixed a bug that disregarded tuple labels in some cases. This could cause unexpected behavior, such as when converting a tuple to JSON with `to_jsonb`. See #136167 for more details. The incorrect removal of tuple labels bug was introduced in v22.1.0, and changes in v24.3.0 made unexpected behavior due to the bug more likely. #138840 +- Previously, CockroachDB could encounter an internal error `comparison of two different versions of enum` in some cases when a user-defined type was modified within a transaction and the following statements read the column of that user-defined type. The bug was introduced in v24.2 and is now fixed. #138052 +- Secondary tenants will no longer fatal when issuing HTTP requests during tenant startup. #138755 +- Fixed a bug where columns created with `GENERATED ... AS IDENTITY` with the `SERIAL` type could incorrectly fail internal validations. #139101 +- When the session variable `allow_role_memberships_to_change_during_transaction` is set, it is now possible to create and drop users quickly even when there are contending transactions on the `system.users` and `system.role_options` system tables. #139032 +- Fixed a bug where the error `batch timestamp ... must be after replica GC threshold` could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the `failed` state. #139250 +- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug had existed since v23.1. #136124 diff --git a/src/current/_includes/releases/v24.3/v24.3.6.md b/src/current/_includes/releases/v24.3/v24.3.6.md index c9cb630e9c0..00dc06dba24 100644 --- a/src/current/_includes/releases/v24.3/v24.3.6.md +++ b/src/current/_includes/releases/v24.3/v24.3.6.md @@ -6,10 +6,9 @@ Release Date: February 19, 2025

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. [#141655][#141655] +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp ... must be after replica GC threshold"` error. #141655 {{site.data.alerts.callout_danger}} This fix is present in [v24.3.4](#v24-3-4) and v24.3.6, but was **not** released in [v24.3.5](#v24-3-5). {{site.data.alerts.end}} -[#141655]: https://github.com/cockroachdb/cockroach/pull/141655 diff --git a/src/current/_includes/releases/v24.3/v24.3.7.md b/src/current/_includes/releases/v24.3/v24.3.7.md index 1ef7b098508..639417451e8 100644 --- a/src/current/_includes/releases/v24.3/v24.3.7.md +++ b/src/current/_includes/releases/v24.3/v24.3.7.md @@ -7,40 +7,40 @@ Release Date: March 6, 2025

SQL language changes

- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that a full scan of a partial index would not normally be considered a "full scan" for the purposes of the `AVOID_FULL_SCAN` and `NO_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, it is considered a full scan. - [#140255][#140255] [#140998][#140998] + #140255 #140998 - Added the `optimizer_prefer_bounded_cardinality` session setting, which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. - [#140255][#140255] [#140998][#140998] + #140255 #140998 - Added the `optimizer_check_input_min_row_count` session setting to control the minimum row count estimate for buffer scans of foreign key and uniqueness checks. It defaults to `0`. - [#141375][#141375] + #141375 - Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns if the `sql.stats.non_indexed_json_histograms.enabled` cluster setting is set to `false`. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. - [#139897][#139897] [#140998][#140998] + #139897 #140998 - Fixed a bug that could cause unexpected errors with SQL `VALUES` clauses that contain function calls with multiple overloads. This bug existed only in pre-release versions of v25.1. - [#140646][#140646] + #140646 - Added the `optimizer_min_row_count` session setting. This setting sets a lower bound on row count estimates for relational expressions during query planning. A value of `0` (default) indicates no lower bound. If set to a value greater than `0`, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value greater than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. - [#140255][#140255] [#140998][#140998] + #140255 #140998

Operational changes

- Reduced noise in dynamically provisioned logging sinks by logging flush errors at most once per minute. - [#139643][#139643] + #139643 - The `cockroach node decommission` CLI command now waits until the target node is drained before marking it as fully decommissioned. Previously, it would start the drain but not wait, leaving the target node briefly in a state where it would be unable to communicate with the cluster but would still accept client requests (which would then hang or hit unexpected errors). - [#139556][#139556] + #139556

Command-line changes

- Improved the performance of the `cockroachd debug zip` command when retrieving data from `crdb_internal.transaction_contention_events`, reducing the likelihood of `memory budget exceeded` or `query execution canceled due to statement timeout` errors. - [#139754][#139754] + #139754

Bug fixes

- Previously, in changefeeds using CDC queries and the Parquet format, the output would include duplicate columns when it contained a user-defined primary key. Now, the columns are de-duplicated columns in the output when writing to Parquet. - [#140153][#140153] + #140153 - Fixed a bug where dropping a table with a trigger using the legacy schema changer could leave an orphaned reference in the descriptor. This occurred when two tables were dependent on each other via a trigger, and the table containing the trigger was dropped. - [#141179][#141179] + #141179 - Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp must be after replica GC threshold"` error. - [#140284][#140284] + #140284 - The Data Distribution and Zone Configs report on the DB Console Advanced Debug page will no longer crash if there are `NULL` values for the `raw_sql_config` column in the `crdb_internal.zones` table. - [#140661][#140661] + #140661 - Fixed possible index corruption caused by triggers that could occur when the following conditions were satisfied: 1. A query calls a user-defined function (UDF) or stored procedure, and also performs a mutation on a table. @@ -49,36 +49,18 @@ Release Date: March 6, 2025 3. The trigger modifies the same row as the outer statement. 4. Either the outer or inner mutation is something other than an `INSERT` without an `ON CONFLICT` clause. - [#138361][#138361] + #138361 - Fixed a bug where activating statement diagnostics sometimes appeared unresponsive, with no state or status update. The status now always indicates whether diagnostics are active or a statement bundle is available for download. - [#139585][#139585] + #139585 - Fixed a memory leak that could previously occur when evaluating some memory-intensive queries via the vectorized engine in CockroachDB. The leak had been present since v20.2. - [#139095][#139095] + #139095 - Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `"batch timestamp must be after replica GC threshold"` error. - [#140084][#140084] + #140084 - Fixed a bug in the `kafka_sink_config` option for changefeeds where users were previously unable to set negative GZIP compression levels. Users can now configure the `CompressionLevel` field in the range of `[-2, 9]` where `-2` enables Huffman encoding and `-1` sets the default compression. - [#141037][#141037] + #141037 - Fixed a bug that would cause an internal error when the result of a `RECORD`-returning user-defined function (UDF) was wrapped by another expression (such as `COALESCE`) within a `VALUES` clause. - [#140646][#140646] + #140646 - Fixed a rare bug in which a query could fail with the error `could not find computed column expression for column in table` while dropping a virtual computed column from a table. This bug was introduced in v23.2.4. - [#139833][#139833] + #139833 -[#139897]: https://github.com/cockroachdb/cockroach/pull/139897 -[#139643]: https://github.com/cockroachdb/cockroach/pull/139643 -[#140153]: https://github.com/cockroachdb/cockroach/pull/140153 -[#138361]: https://github.com/cockroachdb/cockroach/pull/138361 -[#141179]: https://github.com/cockroachdb/cockroach/pull/141179 -[#140284]: https://github.com/cockroachdb/cockroach/pull/140284 -[#140998]: https://github.com/cockroachdb/cockroach/pull/140998 -[#140255]: https://github.com/cockroachdb/cockroach/pull/140255 -[#140646]: https://github.com/cockroachdb/cockroach/pull/140646 -[#140661]: https://github.com/cockroachdb/cockroach/pull/140661 -[#139585]: https://github.com/cockroachdb/cockroach/pull/139585 -[#139095]: https://github.com/cockroachdb/cockroach/pull/139095 -[#140084]: https://github.com/cockroachdb/cockroach/pull/140084 -[#141037]: https://github.com/cockroachdb/cockroach/pull/141037 -[#141375]: https://github.com/cockroachdb/cockroach/pull/141375 -[#139556]: https://github.com/cockroachdb/cockroach/pull/139556 -[#139754]: https://github.com/cockroachdb/cockroach/pull/139754 -[#139833]: https://github.com/cockroachdb/cockroach/pull/139833 diff --git a/src/current/_includes/releases/v24.3/v24.3.8.md b/src/current/_includes/releases/v24.3/v24.3.8.md index e0b70ca918f..5735f3c2eb8 100644 --- a/src/current/_includes/releases/v24.3/v24.3.8.md +++ b/src/current/_includes/releases/v24.3/v24.3.8.md @@ -7,7 +7,6 @@ Release Date: March 12, 2025

Bug fixes

- Improved S3 credential caching for STS credentials to avoid exceeding the Amazon metadata service rate limit and encountering errors related to AssumeRole API calls when accessing large numbers of files in larger clusters. - [#142679][#142679] + #142679 -[#142679]: https://github.com/cockroachdb/cockroach/pull/142679 diff --git a/src/current/_includes/releases/v24.3/v24.3.9.md b/src/current/_includes/releases/v24.3/v24.3.9.md index e370d2af14a..1174ec70557 100644 --- a/src/current/_includes/releases/v24.3/v24.3.9.md +++ b/src/current/_includes/releases/v24.3/v24.3.9.md @@ -7,75 +7,54 @@ Release Date: April 2, 2025

SQL language changes

- Added the `WITH IGNORE_FOREIGN_KEYS` option to `SHOW CREATE TABLE`, which omits foreign key constraints from the output schema. This option is also allowed in `SHOW CREATE VIEW`, but has no effect. It cannot be combined with the `WITH REDACT` option. - [#143368][#143368] + #143368

Operational changes

- Added the `sql.transaction_timeout.count` metric to track the number of SQL statements that fail due to exceeding the transaction timeout. - [#142156][#142156] + #142156 - Added the `sql.statement_timeout.count` to track the number of SQL statements that fail due to exceeding the statement timeout. - [#142156][#142156] + #142156 - The `server.client_cert_expiration_cache.capacity` cluster setting has been removed. The `security.certificate.expiration.client` and `security.certificate.ttl.client` metrics now report the lowest value observed for a user in the last 24 hours. - [#143593][#143593] + #143593

Bug fixes

- Fixed a bug that prevented starting multi-table Logical Data Replication (LDR) streams on tables that used user-defined types. - [#141793][#141793] + #141793 - The TTL deletion job now includes a retry mechanism that progressively reduces the batch size when encountering contention. This improves the chances of successful deletion without requiring manual adjustments to TTL knobs. Also added the `jobs.row_level_ttl.num_delete_batch_retries` metric to track the number of times the TTL job had to reduce the batch size and try again. - [#142323][#142323] + #142323 - Fixed a bug when running with the `autocommit_before_ddl` session setting that could cause a runtime error when binding a previously prepared DDL statement. - [#141987][#141987] + #141987 - Fixed a bug where CockroachDB could incorrectly evaluate casts to some OID types (like `REGCLASS`) in some cases. The bug had been present since at least v22.1. - [#141958][#141958] + #141958 - Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index with the primary key column as the predicate expression. - [#141823][#141823] + #141823 - Fixed a bug that would prevent the `CREATE TRIGGER` and `DROP TRIGGER` statements from working if the `autocommit_before_ddl` setting was enabled, and if the statement was either sent as a prepared statement or as part of a batch of multiple statements. - [#142303][#142303] -- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `batch timestamp must be after replica GC threshold` error. [#141720][#141720] -- Fixed a bug where Physical Cluster Replication (PCR) reader catalogs could have orphaned rows in `system.namespace` after an object is renamed. [#142873][#142873] + #142303 +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `batch timestamp must be after replica GC threshold` error. #141720 +- Fixed a bug where Physical Cluster Replication (PCR) reader catalogs could have orphaned rows in `system.namespace` after an object is renamed. #142873 - Fixed a bug that could cause `nil pointer dereference` errors when executing statements with user-defined functions (UDFs). The error could also occur when executing statements with some built-in functions, like `obj_description`. - [#141652][#141652] + #141652 - Fixed a bug in `v24.1.14`, `v24.3.7`, `v24.3.8`, and `v25.1` that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. - [#143635][#143635] + #143635 - Fixed a bug that could prevent `SHOW CREATE TABLE` from working if a database was offline (e.g., due to a `RESTORE` on that database). - [#141509][#141509] + #141509 - Fixed an issue where dropping a database with triggers could fail due to an undropped backreference to a trigger function. - [#142726][#142726] + #142726 - Fixed a bug where the declarative schema changer allowed `CREATE SEQUENCE` operations to proceed even while a `DROP SCHEMA` or `DROP DATABASE` was in progress. Such operations now retry if the parent object has a schema change in progress, preventing new child objects from being created under deleted parent objects. - [#142763][#142763] + #142763 - Fixed a potential deadlock that could occur during client certificate updates while metrics were being collected. This issue affected the reliability of certificate expiration reporting. - [#143593][#143593] + #143593 - Fixed a bug where the fraction completed and internal checkpoints during an index backfill operation would stop getting written if any of the periodic fraction/checkpoint write operations failed. Progress is now logged in addition to being written to the job record. This bug affected schema change operations such as creating an index or adding a non-nullable column to a table. - [#141787][#141787] + #141787 - Fixed a crash due to `use of enum metadata before hydration` when using logical data replication (LDR) with user-defined types. - [#143389][#143389] + #143389

Miscellaneous

- When configuring the `sql.ttl.default_delete_rate_limit` cluster setting, a notice is now displayed informing the user that the TTL rate limit is per leaseholder per table with a [link to the docs](https://www.cockroachlabs.com/docs/dev/row-level-ttl). - [#142833][#142833] -- Improved S3 credential caching for AWS Security Token Service (STS) credentials. [#142437][#142437] + #142833 +- Improved S3 credential caching for AWS Security Token Service (STS) credentials. #142437 -[#142833]: https://github.com/cockroachdb/cockroach/pull/142833 -[#142437]: https://github.com/cockroachdb/cockroach/pull/142437 -[#141823]: https://github.com/cockroachdb/cockroach/pull/141823 -[#141720]: https://github.com/cockroachdb/cockroach/pull/141720 -[#143635]: https://github.com/cockroachdb/cockroach/pull/143635 -[#141793]: https://github.com/cockroachdb/cockroach/pull/141793 -[#141987]: https://github.com/cockroachdb/cockroach/pull/141987 -[#142303]: https://github.com/cockroachdb/cockroach/pull/142303 -[#142873]: https://github.com/cockroachdb/cockroach/pull/142873 -[#141652]: https://github.com/cockroachdb/cockroach/pull/141652 -[#142590]: https://github.com/cockroachdb/cockroach/pull/142590 -[#143368]: https://github.com/cockroachdb/cockroach/pull/143368 -[#142156]: https://github.com/cockroachdb/cockroach/pull/142156 -[#141787]: https://github.com/cockroachdb/cockroach/pull/141787 -[#143389]: https://github.com/cockroachdb/cockroach/pull/143389 -[#143593]: https://github.com/cockroachdb/cockroach/pull/143593 -[#142323]: https://github.com/cockroachdb/cockroach/pull/142323 -[#142763]: https://github.com/cockroachdb/cockroach/pull/142763 -[#141958]: https://github.com/cockroachdb/cockroach/pull/141958 -[#141509]: https://github.com/cockroachdb/cockroach/pull/141509 -[#142726]: https://github.com/cockroachdb/cockroach/pull/142726 diff --git a/src/current/_includes/releases/v25.1/backward-incompatible.md b/src/current/_includes/releases/v25.1/backward-incompatible.md index 656860fdd3c..45dc3c7396e 100644 --- a/src/current/_includes/releases/v25.1/backward-incompatible.md +++ b/src/current/_includes/releases/v25.1/backward-incompatible.md @@ -1,14 +1,14 @@ Before [upgrading to CockroachDB v25.1]({% link v25.1/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as [key cluster setting changes](#v25-1-0-cluster-settings), and adjust your deployment as necessary. -- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. [#133610][#133610] +- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. #133610 -- Altering a paused backup schedule's recurrence or location no longer resumes the schedule. [#134829][#134829] +- Altering a paused backup schedule's recurrence or location no longer resumes the schedule. #134829 -- `BACKUP`/`RESTORE` statements no longer return index entries and bytes backed up/restored. [#134516][#134516] +- `BACKUP`/`RESTORE` statements no longer return index entries and bytes backed up/restored. #134516 -- Introduced the `legacy_varchar_typing` session setting. If `on`, type checking and overload resolution for VARCHAR types ignore overloads that cause errors, allowing comparisons between VARCHAR and non-STRING-like placeholder values to execute successfully. If `off`, type checking of these comparisons is more strict and must be handled with explicit type casts. As of v25.1.0 this setting defaults to `off`. [#137844][#137844] +- Introduced the `legacy_varchar_typing` session setting. If `on`, type checking and overload resolution for VARCHAR types ignore overloads that cause errors, allowing comparisons between VARCHAR and non-STRING-like placeholder values to execute successfully. If `off`, type checking of these comparisons is more strict and must be handled with explicit type casts. As of v25.1.0 this setting defaults to `off`. #137844 -- Several metrics are redundant and have been removed. The following list maps each removed metric to an existing, identical metric. [#138786][#138786] +- Several metrics are redundant and have been removed. The following list maps each removed metric to an existing, identical metric. #138786 - Removed `sql.schema_changer.running`, which is redundant with `jobs.schema_change.currently_running`. - Removed `sql.schema_changer.successes`, which is redundant with `jobs.schema_change.resume_completed`. - Removed `sql.schema_changer.retry_errors`, which is redundant with `jobs.schema_change.resume_retry_error`. @@ -22,12 +22,5 @@ Before [upgrading to CockroachDB v25.1]({% link v25.1/upgrade-cockroach-version. ``` ALTER ROLE ALL SET autocommit_before_ddl = false; ``` - [#140156][#140156] + #140156 - -[#133610]: https://github.com/cockroachdb/cockroach/pull/133610 -[#134829]: https://github.com/cockroachdb/cockroach/pull/134829 -[#134516]: https://github.com/cockroachdb/cockroach/pull/134516 -[#137844]: https://github.com/cockroachdb/cockroach/pull/137844 -[#138786]: https://github.com/cockroachdb/cockroach/pull/138786 -[#140156]: https://github.com/cockroachdb/cockroach/pull/140156 \ No newline at end of file diff --git a/src/current/_includes/releases/v25.1/cluster-setting-changes.md b/src/current/_includes/releases/v25.1/cluster-setting-changes.md index 758d278d707..03ddc09766c 100644 --- a/src/current/_includes/releases/v25.1/cluster-setting-changes.md +++ b/src/current/_includes/releases/v25.1/cluster-setting-changes.md @@ -8,45 +8,45 @@ Changes to [cluster settings]({% link v25.1/cluster-settings.md %}) should be re
Settings added
-- `kv.transaction.max_intents_and_locks`: accepts an integer value for the maximum number of inserts or durable locks allowed for a single transactions. When set to the default of `0`, this limiting is disabled. [#135945][#135945] +- `kv.transaction.max_intents_and_locks`: accepts an integer value for the maximum number of inserts or durable locks allowed for a single transactions. When set to the default of `0`, this limiting is disabled. #135945 -- Schema object identifiers (e.g. database names, schema names, table names, and function names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. [#136897][#136897] +- Schema object identifiers (e.g. database names, schema names, table names, and function names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. #136897 -- Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. The previous behavior can be re-enabled by setting the cluster setting `sql.stats.non_indexed_json_histograms.enabled` to `true`. [#139898][#139898] +- Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. The previous behavior can be re-enabled by setting the cluster setting `sql.stats.non_indexed_json_histograms.enabled` to `true`. #139898 -- `ui.database_locality_metadata.enabled` allows operators to disable the loading of extended region information in the DB Console Database and Table pages. In versions prior to v24.3, this information can cause significant CPU load on large clusters with many ranges. When disabled, if customers require this data, they can use the query `SHOW RANGES FROM {DATABASE| TABLE}` to compute it on demand. [#133075][#133075] +- `ui.database_locality_metadata.enabled` allows operators to disable the loading of extended region information in the DB Console Database and Table pages. In versions prior to v24.3, this information can cause significant CPU load on large clusters with many ranges. When disabled, if customers require this data, they can use the query `SHOW RANGES FROM {DATABASE| TABLE}` to compute it on demand. #133075
Settings with changed defaults
-- The `kvadmission.flow_control.mode` default value has been changed from `apply_to_elastic` to `apply_to_all`. Regular writes are now subject to admission control by default, meaning that non-quorum required replicas may not be informed of new writes from the leader if they are unable to keep up. This brings a large performance improvement in scenarios with a large backlog of replication work toward one or more nodes, such as node restarts. The behavior can be reverted to the v24.3 and earlier default by changing the setting value to `apply_to_elastic`. [#133860][#133860] +- The `kvadmission.flow_control.mode` default value has been changed from `apply_to_elastic` to `apply_to_all`. Regular writes are now subject to admission control by default, meaning that non-quorum required replicas may not be informed of new writes from the leader if they are unable to keep up. This brings a large performance improvement in scenarios with a large backlog of replication work toward one or more nodes, such as node restarts. The behavior can be reverted to the v24.3 and earlier default by changing the setting value to `apply_to_elastic`. #133860 -- `kvadmission.store.snapshot_ingest_bandwidth_control.enabled` is now `true` by default. This will enable disk-bandwidth-based admission control for range snapshot ingests. It requires the provisioned bandwidth to be set using `kvadmission.store.provisioned_bandwidth`. [#137618][#137618] +- `kvadmission.store.snapshot_ingest_bandwidth_control.enabled` is now `true` by default. This will enable disk-bandwidth-based admission control for range snapshot ingests. It requires the provisioned bandwidth to be set using `kvadmission.store.provisioned_bandwidth`. #137618 -- `sql.stats.automatic_partial_collection.enabled` is now `true` by default. This enables automatic collection of partial table stats. Partial table stats (i.e. those created with `CREATE STATISTICS ... USING EXTREMES`) scan the lower and upper ends of indexes to collect statistics outside the range covered by the previous full statistics collection. [#133988][#133988] +- `sql.stats.automatic_partial_collection.enabled` is now `true` by default. This enables automatic collection of partial table stats. Partial table stats (i.e. those created with `CREATE STATISTICS ... USING EXTREMES`) scan the lower and upper ends of indexes to collect statistics outside the range covered by the previous full statistics collection. #133988 -- The default value for `trace.span_registry.enabled` has been changed from `true` to `false`. [#135682][#135682] +- The default value for `trace.span_registry.enabled` has been changed from `true` to `false`. #135682
Settings with changed visibility
The following settings are now marked `public` after previously being `reserved`. Reserved settings are not documented and their tuning by customers is not supported. -- `kv.bulk_io_write.min_capacity_remaining_fraction` is now public. It specifies the remaining store capacity fraction below which bulk ingestion requests are rejected. It defaults to `0.05`, and can be set between `0.04` and `0.3`. [#135779][#135779] +- `kv.bulk_io_write.min_capacity_remaining_fraction` is now public. It specifies the remaining store capacity fraction below which bulk ingestion requests are rejected. It defaults to `0.05`, and can be set between `0.04` and `0.3`. #135779
Renamed settings
-- Renamed `changefeed.min_highwater_advance` to `changefeed.resolved_timestamp.min_update_interval` to more accurately reflect its function. The previous name remains usable for backward compatibility. Its value is the minimum amount of time that must have elapsed since the last update of a changefeed's resolved timestamp before it is eligible to be updated again. With the default of `0s`, no minimum interval is enforced, though updates are still limited by the average time needed to checkpoint progress. [#138673][#138673] +- Renamed `changefeed.min_highwater_advance` to `changefeed.resolved_timestamp.min_update_interval` to more accurately reflect its function. The previous name remains usable for backward compatibility. Its value is the minimum amount of time that must have elapsed since the last update of a changefeed's resolved timestamp before it is eligible to be updated again. With the default of `0s`, no minimum interval is enforced, though updates are still limited by the average time needed to checkpoint progress. #138673 -- Renamed `changefeed.frontier_highwater_lag_checkpoint_threshold` to `changefeed.span_checkpoint.lag_threshold`. The previous name is still available for backward compatibility. [#139064][#139064] +- Renamed `changefeed.frontier_highwater_lag_checkpoint_threshold` to `changefeed.span_checkpoint.lag_threshold`. The previous name is still available for backward compatibility. #139064
Additional setting changes
-- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` cluster setting. This allows index creation even when the setting is enabled. [#137681][#137681] +- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` cluster setting. This allows index creation even when the setting is enabled. #137681 -- When `server.redact_sensitive_settings.enabled` is `true`, the same redaction logic for [Sensitive cluster settings]({% link v25.1/cluster-settings.md %}#sensitive-settings) that is used for `SHOW CLUSTER SETTINGS` now applies to the DB Console Cluster Settings page. [#139277][#139277] +- When `server.redact_sensitive_settings.enabled` is `true`, the same redaction logic for [Sensitive cluster settings]({% link v25.1/cluster-settings.md %}#sensitive-settings) that is used for `SHOW CLUSTER SETTINGS` now applies to the DB Console Cluster Settings page. #139277 -- Removed cluster setting `kv.rangefeed.scheduler.enabled`. The rangefeed scheduler is now unconditionally enabled. [#132825][#132825] +- Removed cluster setting `kv.rangefeed.scheduler.enabled`. The rangefeed scheduler is now unconditionally enabled. #132825 -- Removed cluster setting `sql.auth.resolve_membership_single_scan.enabled`. This was added in case it was necessary to revert back to the previous behavior for looking up role memberships, but this cluster setting has not been needed in practice since this was added in v23.1. [#135852][#135852] +- Removed cluster setting `sql.auth.resolve_membership_single_scan.enabled`. This was added in case it was necessary to revert back to the previous behavior for looking up role memberships, but this cluster setting has not been needed in practice since this was added in v23.1. #135852
Settings requiring operational changes
diff --git a/src/current/_includes/releases/v25.1/deprecations.md b/src/current/_includes/releases/v25.1/deprecations.md index 7a8f724c88d..2dabbcc6d4f 100644 --- a/src/current/_includes/releases/v25.1/deprecations.md +++ b/src/current/_includes/releases/v25.1/deprecations.md @@ -1,5 +1,4 @@ The following deprecations are announced in v25.1. -- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. [#133610][#133610] +- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. #133610 -[#133610]: https://github.com/cockroachdb/cockroach/pull/133610 diff --git a/src/current/_includes/releases/v25.1/upgrade-finalization.md b/src/current/_includes/releases/v25.1/upgrade-finalization.md index b215c310f8e..a72e439389f 100644 --- a/src/current/_includes/releases/v25.1/upgrade-finalization.md +++ b/src/current/_includes/releases/v25.1/upgrade-finalization.md @@ -1,9 +1,9 @@ During a major-version upgrade, certain features and performance improvements may not be available until the upgrade is finalized. In v25.1, these are: - A cluster must have an [Enterprise license]({% link v25.1/licensing-faqs.md %}#set-a-license) or a [trial license]({% link v25.1/licensing-faqs.md %}#obtain-a-license) set before an upgrade to v25.1 can be finalized. -- Support for XA transactions, which allow CockroachDB to participate in distributed transactions with other resources (e.g. databases or message queues) using a two-phase commit protocol. [#129448](https://github.com/cockroachdb/cockroach/pull/129448) +- Support for XA transactions, which allow CockroachDB to participate in distributed transactions with other resources (e.g. databases or message queues) using a two-phase commit protocol. #129448 - [`ALTER TABLE ... ALTER COLUMN TYPE`]({% link v25.1/alter-table.md %}#alter-column-data-types) is in [General Availability (GA)]({% link v25.1/cockroachdb-feature-availability.md %}#feature-availability-phases). - Jobs system changes: - - `SHOW JOBS` is now based on a new mechanism for storing information about the progress and status of running jobs. [#139230](https://github.com/cockroachdb/cockroach/pull/139230) - - `ALTER JOB ... OWNER TO` can now be used to transfer ownership of a job between users/roles. [#138139](https://github.com/cockroachdb/cockroach/pull/138139) - - Users can now always see and control (pause/resume/cancel) jobs that they own. [#138178](https://github.com/cockroachdb/cockroach/pull/138178) + - `SHOW JOBS` is now based on a new mechanism for storing information about the progress and status of running jobs. #139230 + - `ALTER JOB ... OWNER TO` can now be used to transfer ownership of a job between users/roles. #138139 + - Users can now always see and control (pause/resume/cancel) jobs that they own. #138178 diff --git a/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md b/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md index 664d7d6b9da..51234527c53 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-alpha.1.md @@ -6,11 +6,11 @@ Release Date: December 19, 2024

Backward-incompatible changes

-- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. [#133610][#133610] +- The old `BACKUP TO`, `RESTORE FROM `, and `SHOW BACKUP IN ` syntaxes are now fully deprecated and no longer usable. #133610

Security updates

-- Added support for partial roles from LDAP synced group to be mapped to CockroachDB roles and ensure appropriate erroring for undesired behavior. [#135552][#135552] +- Added support for partial roles from LDAP synced group to be mapped to CockroachDB roles and ensure appropriate erroring for undesired behavior. #135552

General changes

@@ -20,175 +20,103 @@ Release Date: December 19, 2024 - `changefeed.sink_batch_hist_nanos` - `changefeed.flush_hist_nanos` - `changefeed.kafka_throttling_hist_nanos` -[#136265][#136265] -- Added support for multiple seed brokers in the new Kafka sink. [#136632][#136632] -- Added the new metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. [#136741][#136741] -- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps. This table is required for change data capture queries. [#133568][#133568] -- Added the `AWS_USE_PATH_STYLE` parameter to S3 URI parsing. [#136715][#136715] +#136265 +- Added support for multiple seed brokers in the new Kafka sink. #136632 +- Added the new metric `distsender.rangefeed.catchup_ranges_waiting_client_side` that counts how many rangefeeds are waiting on the client-side limiter to start performing catchup scans. #136741 +- Added `system.users` to the list of system tables that changefeeds protect with protected timestamps. This table is required for change data capture queries. #133568 +- Added the `AWS_USE_PATH_STYLE` parameter to S3 URI parsing. #136715

SQL language changes

-- Two new builtin functions, `crdb_internal.{lease_holder, range_stats}_with_errors`, include errors encountered while fetching leaseholder and range stats. These new builtins are used by the `crdb_internal.ranges` table, which includes a new column, `errors`, that combines the errors from the builtins. [#131232][#131232] -- The cluster setting `sql.stats.automatic_partial_collection.enabled` is now enabled by default, which enables automatic collection of partial table stats. Partial table stats (i.e., those created with `CREATE STATISTICS ... USING EXTREMES`) scan the lower and upper ends of indexes to collect statistics outside the range covered by the previous full statistics collection. [#133988][#133988] -- When triggers fire one another cyclically, the new `recursion_depth_limit` session variable now limits the depth of the recursion. By default, the limit is `1000` nested trigger executions. [#134498][#134498] -- The names of `BEFORE` triggers fired by a mutation now show up in the `EXPLAIN` output. The trigger-function invocations are visible in the output of verbose `EXPLAIN`. [#135556][#135556] -- `AFTER` triggers will now show up in the output of `EXPLAIN`, as well as `EXPLAIN ANALYZE`. [#135556][#135556] -- Added support for `SHOW TRIGGERS`, which displays the names of all triggers on a table, as well as whether each trigger is enabled. The user must have any privilege on the table, or be its owner. [#135778][#135778] -- Added support for `SHOW CREATE TRIGGER`, which displays the `CREATE` statement for a trigger. The user must have any privilege on the table, or be its owner. [#135778][#135778] -- Added an informational notice to the result of `CREATE TABLE ... AS` statements that describes that indexes and constraints are not copied to the new table. [#135845][#135845] -- Altering a column’s type no longer requires enabling the `enable_experimental_alter_column_type_general` session variable. This change makes the feature generally available. [#135936][#135936] -- Added support for `COLLATE` expressions on arrays of strings to match PostgreSQL more closely. [#133751][#133751] -- Added the column `readable_high_water_timestamp` to the output of `SHOW CHANGEFEED JOBS`. This human-readable form will be easier to consume. `high_water_timestamp` still exists and is in epoch nanoseconds. [#135623][#135623] -- The `sql_safe_updates` session variable must be disabled to perform `ALTER COLUMN TYPE` operations that require a column rewrite. [#136110][#136110] -- Added the `CREATE LOGICALLY REPLICATED` syntax that will direct logical data replication jobs to create the destination table(s) using a copy of the source table(s). [#136841][#136841] -- It is now possible to execute queries with correlated joins with sub-queries or common table expressions in both the `INNER` and `OUTER` context. Errors with the following message: `unimplemented: apply joins with subqueries in the "inner" and "outer" contexts are not supported` will no longer occur. [#136506][#136506] -- It is now possible to include a common table expression within the body of a user-defined function or stored procedure. [#136506][#136506] -- Updated the column name `targets` to `tables` in the `SHOW LOGICAL REPLICATION JOBS` responses. [#134339][#134339] +- Two new builtin functions, `crdb_internal.{lease_holder, range_stats}_with_errors`, include errors encountered while fetching leaseholder and range stats. These new builtins are used by the `crdb_internal.ranges` table, which includes a new column, `errors`, that combines the errors from the builtins. #131232 +- The cluster setting `sql.stats.automatic_partial_collection.enabled` is now enabled by default, which enables automatic collection of partial table stats. Partial table stats (i.e., those created with `CREATE STATISTICS ... USING EXTREMES`) scan the lower and upper ends of indexes to collect statistics outside the range covered by the previous full statistics collection. #133988 +- When triggers fire one another cyclically, the new `recursion_depth_limit` session variable now limits the depth of the recursion. By default, the limit is `1000` nested trigger executions. #134498 +- The names of `BEFORE` triggers fired by a mutation now show up in the `EXPLAIN` output. The trigger-function invocations are visible in the output of verbose `EXPLAIN`. #135556 +- `AFTER` triggers will now show up in the output of `EXPLAIN`, as well as `EXPLAIN ANALYZE`. #135556 +- Added support for `SHOW TRIGGERS`, which displays the names of all triggers on a table, as well as whether each trigger is enabled. The user must have any privilege on the table, or be its owner. #135778 +- Added support for `SHOW CREATE TRIGGER`, which displays the `CREATE` statement for a trigger. The user must have any privilege on the table, or be its owner. #135778 +- Added an informational notice to the result of `CREATE TABLE ... AS` statements that describes that indexes and constraints are not copied to the new table. #135845 +- Altering a column’s type no longer requires enabling the `enable_experimental_alter_column_type_general` session variable. This change makes the feature generally available. #135936 +- Added support for `COLLATE` expressions on arrays of strings to match PostgreSQL more closely. #133751 +- Added the column `readable_high_water_timestamp` to the output of `SHOW CHANGEFEED JOBS`. This human-readable form will be easier to consume. `high_water_timestamp` still exists and is in epoch nanoseconds. #135623 +- The `sql_safe_updates` session variable must be disabled to perform `ALTER COLUMN TYPE` operations that require a column rewrite. #136110 +- Added the `CREATE LOGICALLY REPLICATED` syntax that will direct logical data replication jobs to create the destination table(s) using a copy of the source table(s). #136841 +- It is now possible to execute queries with correlated joins with sub-queries or common table expressions in both the `INNER` and `OUTER` context. Errors with the following message: `unimplemented: apply joins with subqueries in the "inner" and "outer" contexts are not supported` will no longer occur. #136506 +- It is now possible to include a common table expression within the body of a user-defined function or stored procedure. #136506 +- Updated the column name `targets` to `tables` in the `SHOW LOGICAL REPLICATION JOBS` responses. #134339

Operational changes

-- Retired the cluster setting `kv.rangefeed.scheduler.enabled`. The rangefeed scheduler is now unconditionally enabled. [#132825][#132825] -- Added the cluster setting `ui.database_locality_metadata.enabled` that allows operators to disable loading extended database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 and later do not have this problem. If customers require this data, they can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. [#133075][#133075] -- The metrics scrape HTTP endpoint at `/ _status/vars` will now truncate `HELP` text at the first sentence, reducing the metadata for metrics with large descriptions. Descriptions are still accessible in the documentation. [#134724][#134724] -- The row-level TTL job will now periodically update the progress meter in the jobs introspection interfaces, including `SHOW JOBS` and the Jobs page in the DB console. [#135142][#135142] -- The `kv.bulk_io_write.min_capacity_remaining_fraction` cluster setting can be be set between `0.04` and `0.3`. [#135779][#135779] -- Added two new metrics, `sql.distsql.select.distributed_exec.count` and `sql.distsql.select.distributed_exec.count.internal`. These metrics count the number of `SELECT` statements that actually execute with full or partial distribution. These metrics differ from `sql.distsql.select.count` and `sql.distsql.select.count.internal` in that the latter count the number of `SELECT` statements that are **planned** with full or partial distribution, but might not necessarily execute with full or partial distribution, depending on the location of data. [#135236][#135236] -- Added the new metric `sql.distsql.distributed_exec.count` that counts the number of invocations of the execution engine with full or partial distribution. (This is in contrast to `sql.distsql.queries.total`, which counts the total number of invocations of the execution engine.) [#135236][#135236] +- Retired the cluster setting `kv.rangefeed.scheduler.enabled`. The rangefeed scheduler is now unconditionally enabled. #132825 +- Added the cluster setting `ui.database_locality_metadata.enabled` that allows operators to disable loading extended database and table region information in the DB Console Database and Table pages. This information can cause significant CPU load on large clusters with many ranges. Versions of this page from v24.3 and later do not have this problem. If customers require this data, they can use the `SHOW RANGES FROM {DATABASE| TABLE}` query via SQL to compute on-demand. #133075 +- The metrics scrape HTTP endpoint at `/ _status/vars` will now truncate `HELP` text at the first sentence, reducing the metadata for metrics with large descriptions. Descriptions are still accessible in the documentation. #134724 +- The row-level TTL job will now periodically update the progress meter in the jobs introspection interfaces, including `SHOW JOBS` and the Jobs page in the DB console. #135142 +- The `kv.bulk_io_write.min_capacity_remaining_fraction` cluster setting can be be set between `0.04` and `0.3`. #135779 +- Added two new metrics, `sql.distsql.select.distributed_exec.count` and `sql.distsql.select.distributed_exec.count.internal`. These metrics count the number of `SELECT` statements that actually execute with full or partial distribution. These metrics differ from `sql.distsql.select.count` and `sql.distsql.select.count.internal` in that the latter count the number of `SELECT` statements that are **planned** with full or partial distribution, but might not necessarily execute with full or partial distribution, depending on the location of data. #135236 +- Added the new metric `sql.distsql.distributed_exec.count` that counts the number of invocations of the execution engine with full or partial distribution. (This is in contrast to `sql.distsql.queries.total`, which counts the total number of invocations of the execution engine.) #135236 - Added some clarification that the following metrics count invocations of the execution engine and not SQL queries (which could each result in multiple invocations of the execution engine): - `sql.distsql.queries.active` - `sql.distsql.queries.total` - - `sql.distsql.distributed_exec.count` [#135236][#135236] -- The default value for the cluster setting `trace.span_registry.enabled` has been changed from `true` to `false`. [#135682][#135682] -- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added in case it was necessary to revert back to the previous behavior for looking up role memberships, but this cluster setting has not been needed in practice since this was added in v23.1. [#135852][#135852] -- Telemetry delivery is now considered successful even in cases where CockroachDB experiences a network timeout. This will prevent throttling in cases outside an operator's control. [#136219][#136219] -- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. [#136899][#136899] + - `sql.distsql.distributed_exec.count` #135236 +- The default value for the cluster setting `trace.span_registry.enabled` has been changed from `true` to `false`. #135682 +- Removed the `sql.auth.resolve_membership_single_scan.enabled` cluster setting. This was added in case it was necessary to revert back to the previous behavior for looking up role memberships, but this cluster setting has not been needed in practice since this was added in v23.1. #135852 +- Telemetry delivery is now considered successful even in cases where CockroachDB experiences a network timeout. This will prevent throttling in cases outside an operator's control. #136219 +- When a schema change job is completed, rolls back, or encounters a failure, the time taken since the job began is now logged in a structured log in the `SQL_SCHEMA` log channel. #136899

DB Console changes

-- As of v25.1 the legacy Database page, which was previously available through the Advanced Debug page, is no longer available. [#134005][#134005] -- When activating statement diagnostics in the DB Console, users now have the option to produce a redacted bundle as output. This bundle will omit sensitive data. [#134723][#134723] -- Fixed a list of UI bugs on the DB Console Overview and Node Overview pages. [#135293][#135293] -- Removed the link for the legacy table page on the Plan Details page. [#136311][#136311] -- Changed the table and index contents of the Hot Ranges page in DB console. [#134106][#134106] +- As of v25.1 the legacy Database page, which was previously available through the Advanced Debug page, is no longer available. #134005 +- When activating statement diagnostics in the DB Console, users now have the option to produce a redacted bundle as output. This bundle will omit sensitive data. #134723 +- Fixed a list of UI bugs on the DB Console Overview and Node Overview pages. #135293 +- Removed the link for the legacy table page on the Plan Details page. #136311 +- Changed the table and index contents of the Hot Ranges page in DB console. #134106

Bug fixes

-- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug has existed since v23.1. [#132147][#132147] -- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. [#133300][#133300] -- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. [#133037][#133037] -- Fixed a race condition in Sarama when Kafka throttling is enabled. [#133563][#133563] -- Fixed a metrics bug in rangefeed restarts introduced in v23.2. [#133947][#133947] -- Fixed a bug that could result in incorrect metrics related to retryable rangefeed errors. [#133991][#133991] -- Fixed a bug that could cause `DELETE` triggers not to fire on cascading `DELETE`, and which could cause `INSERT` triggers to match incorrectly in the same scenario. [#134759][#134759] -- Non-`admin` users that run `DROP ROLE IF EXISTS` on a user that does not exist will no longer receive an error message. [#134850][#134850] -- Fixed a bug where CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements. The bug had been present since v22.1. [#134919][#134919] -- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). [#134891][#134891] -- Fixed a bug that prevented restoring cluster backups taken in a multi-region cluster that had configured the `system` database with a region configuration into a non-multi-region cluster. [#134604][#134604] -- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. [#135149][#135149] -- `ALTER DATABASE` operations no longer hang when the operation modifies the zone config if an invalid zone config already exists. [#135168][#135168] -- CockroachDB now correctly evaluates `percentile_cont` and `percentile_disc` aggregates over `FLOAT4` values. [#135130][#135130] -- The schema changer's backfill process now includes a retry mechanism that reduces the batch size when memory issues occur. This improves the likelihood of operation success without requiring manual adjustment of the `bulko.index_backfill.batch_size` cluster setting. [#135563][#135563] -- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. [#135910][#135910] -- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. [#135944][#135944] -- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. [#135970][#135970] -- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. [#135596][#135596] -- A table that is participating in logical data replication can no longer be dropped. Previously, this was allowed, which would cause all the replicated rows to end up in the dead-letter queue. [#136172][#136172] -- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. [#136298][#136298] -- `CREATE` relation / type could leave dangling namespace entries if the schema was concurrently being dropped. [#136325][#136325] -- The `idle_in_session_timeout` session variable now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. [#136463][#136463] -- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. [#136538][#136538] -- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. [#136631][#136631] -- Fixed a bug that would cause the `make_timestamp` and `make_timestamptz` builtin functions to incorrectly extract the `seconds` argument if the value was less than `1`. [#136804][#136804] +- Fixed a bug that prevented the `CREATE` statement for a routine from being shown in a statement bundle. This happened when the routine was created on a schema other than `public`. The bug has existed since v23.1. #132147 +- Reduced the duration of partitions in the gossip network when a node crashes in order to eliminate false positives in the `ranges.unavailable` metric. #133300 +- Fixed a bug that caused incorrect evaluation of some binary expressions involving `CHAR(N)` values and untyped string literals with trailing whitespace characters. For example, the expression `'f'::CHAR = 'f '` now correctly evaluates to `true`. #133037 +- Fixed a race condition in Sarama when Kafka throttling is enabled. #133563 +- Fixed a metrics bug in rangefeed restarts introduced in v23.2. #133947 +- Fixed a bug that could result in incorrect metrics related to retryable rangefeed errors. #133991 +- Fixed a bug that could cause `DELETE` triggers not to fire on cascading `DELETE`, and which could cause `INSERT` triggers to match incorrectly in the same scenario. #134759 +- Non-`admin` users that run `DROP ROLE IF EXISTS` on a user that does not exist will no longer receive an error message. #134850 +- Fixed a bug where CockroachDB would encounter an internal error when evaluating `FETCH ABSOLUTE 0` statements. The bug had been present since v22.1. #134919 +- Fixed a bug that could cause incorrect query results when the optimizer planned a lookup join on an index containing a column of type `CHAR(N)`, `VARCHAR(N)`, `BIT(N)`, `VARBIT(N)`, or `DECIMAL(M, N)`, and the query held that column constant to a single value (e.g., with an equality filter). #134891 +- Fixed a bug that prevented restoring cluster backups taken in a multi-region cluster that had configured the `system` database with a region configuration into a non-multi-region cluster. #134604 +- Fixed an unhandled error that would occur if `DROP SCHEMA` was executed on the `public` schema of the `system` database, or on an internal schema like `pg_catalog` or `information_schema`. #135149 +- `ALTER DATABASE` operations no longer hang when the operation modifies the zone config if an invalid zone config already exists. #135168 +- CockroachDB now correctly evaluates `percentile_cont` and `percentile_disc` aggregates over `FLOAT4` values. #135130 +- The schema changer's backfill process now includes a retry mechanism that reduces the batch size when memory issues occur. This improves the likelihood of operation success without requiring manual adjustment of the `bulko.index_backfill.batch_size` cluster setting. #135563 +- `CREATE SCHEMA` now returns the correct error if a the schema name is missing. #135910 +- Fixed an issue where corrupted table statistics could cause the `cockroach` process to crash. #135944 +- Table statistics collection in CockroachDB could previously run into `no bytes in account to release` errors in some edge cases (when the SQL memory budget, configured via `--max-sql-memory` flag, was close to being exhausted). The bug has been present since v21.2 and is now fixed. #135970 +- `security.certificate.*` metrics will now be updated if a node loads new certificates while running. #135596 +- A table that is participating in logical data replication can no longer be dropped. Previously, this was allowed, which would cause all the replicated rows to end up in the dead-letter queue. #136172 +- `ALTER COLUMN SET NOT NULL` was not enforced consistently when the table was created in the same transaction. #136298 +- `CREATE` relation / type could leave dangling namespace entries if the schema was concurrently being dropped. #136325 +- The `idle_in_session_timeout` session variable now excludes the time spent waiting for schema changer jobs to complete, preventing unintended session termination during schema change operations. #136463 +- Fixed a bug that causes the optimizer to use stale table statistics after altering an `ENUM` type used in the table. #136538 +- CockroachDB now better respects the `statement_timeout` limit on queries involving the top K sort and merge join operations. #136631 +- Fixed a bug that would cause the `make_timestamp` and `make_timestamptz` builtin functions to incorrectly extract the `seconds` argument if the value was less than `1`. #136804 - Fixed possible index corruption caused by triggers that could occur when the following conditions were satisfied: 1. A query calls a user-defined function or stored procedure, and also performs a mutation on a table. 2. The user-defined function or storage procedure contains a statement that either fires an `AFTER` trigger, or fires a `CASCADE` that itself fires a trigger. 3. The trigger modifies the same row as the outer statement. - 4. Either the outer or inner mutation is something other than an `INSERT` without an `ON CONFLICT` clause. [#136076][#136076] -- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`, ensuring proper behavior on cluster restarts. [#136926][#136926] -- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. [#137024][#137024] + 4. Either the outer or inner mutation is something other than an `INSERT` without an `ON CONFLICT` clause. #136076 +- Fixed an issue where license enforcement was not consistently disabled for single-node clusters started with `cockroach start-single-node`, ensuring proper behavior on cluster restarts. #136926 +- Fixed a bug that caused an incorrect filesystem to be logged as part of the store information. #137024

Performance improvements

-- The `/_status/nodes_ui` API no longer returns unnecessary metrics in its response. This decreases the payload size of the API and improves the load time of various DB Console pages and components. [#135186][#135186] -- Performance for some PL/pgSQL loops is now significantly improved, by as much as 3–4 times. This is due to applying tail-call optimization in more cases to the recursive sub-routines that implement loops. [#135145][#135145] -- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. [#135852][#135852] -- The session variable `plan_cache_mode` now defaults to `auto`, enabling generic query plans for some queries. [#135668][#135668] +- The `/_status/nodes_ui` API no longer returns unnecessary metrics in its response. This decreases the payload size of the API and improves the load time of various DB Console pages and components. #135186 +- Performance for some PL/pgSQL loops is now significantly improved, by as much as 3–4 times. This is due to applying tail-call optimization in more cases to the recursive sub-routines that implement loops. #135145 +- Improved the internal caching logic for role membership information. This reduces the latency impact of commands such as `DROP ROLE`, `CREATE ROLE`, and `GRANT role TO user`, which cause the role membership cache to be invalidated. #135852 +- The session variable `plan_cache_mode` now defaults to `auto`, enabling generic query plans for some queries. #135668 - This change is reverted in v25.1.0-rc.1, so this note can be disregarded when running the latest testing release and v25.1 production releases, unless otherwise noted. -- GRPC streams are now pooled across unary intra-cluster RPCs, allowing for reuse of gRPC resources to reduce the cost of remote key-value layer access. This pooling can be disabled using the `rpc.batch_stream_pool.enabled` cluster setting. [#136648][#136648] +- GRPC streams are now pooled across unary intra-cluster RPCs, allowing for reuse of gRPC resources to reduce the cost of remote key-value layer access. This pooling can be disabled using the `rpc.batch_stream_pool.enabled` cluster setting. #136648 - This information was updated in the v25.1.0-beta.2 release notes.

Multi-tenancy

-- The `nodes` endpoint should work for `shared` secondary tenants. Since nodes are common to all the tenants, this API endpoint behaves similarly to the system tenant's endpoint. [#131644][#131644] - -[#131232]: https://github.com/cockroachdb/cockroach/pull/131232 -[#131644]: https://github.com/cockroachdb/cockroach/pull/131644 -[#132147]: https://github.com/cockroachdb/cockroach/pull/132147 -[#132825]: https://github.com/cockroachdb/cockroach/pull/132825 -[#133037]: https://github.com/cockroachdb/cockroach/pull/133037 -[#133075]: https://github.com/cockroachdb/cockroach/pull/133075 -[#133080]: https://github.com/cockroachdb/cockroach/pull/133080 -[#133215]: https://github.com/cockroachdb/cockroach/pull/133215 -[#133300]: https://github.com/cockroachdb/cockroach/pull/133300 -[#133563]: https://github.com/cockroachdb/cockroach/pull/133563 -[#133568]: https://github.com/cockroachdb/cockroach/pull/133568 -[#133610]: https://github.com/cockroachdb/cockroach/pull/133610 -[#133751]: https://github.com/cockroachdb/cockroach/pull/133751 -[#133947]: https://github.com/cockroachdb/cockroach/pull/133947 -[#133988]: https://github.com/cockroachdb/cockroach/pull/133988 -[#133991]: https://github.com/cockroachdb/cockroach/pull/133991 -[#134005]: https://github.com/cockroachdb/cockroach/pull/134005 -[#134106]: https://github.com/cockroachdb/cockroach/pull/134106 -[#134339]: https://github.com/cockroachdb/cockroach/pull/134339 -[#134498]: https://github.com/cockroachdb/cockroach/pull/134498 -[#134604]: https://github.com/cockroachdb/cockroach/pull/134604 -[#134723]: https://github.com/cockroachdb/cockroach/pull/134723 -[#134724]: https://github.com/cockroachdb/cockroach/pull/134724 -[#134759]: https://github.com/cockroachdb/cockroach/pull/134759 -[#134850]: https://github.com/cockroachdb/cockroach/pull/134850 -[#134891]: https://github.com/cockroachdb/cockroach/pull/134891 -[#134919]: https://github.com/cockroachdb/cockroach/pull/134919 -[#135130]: https://github.com/cockroachdb/cockroach/pull/135130 -[#135142]: https://github.com/cockroachdb/cockroach/pull/135142 -[#135145]: https://github.com/cockroachdb/cockroach/pull/135145 -[#135149]: https://github.com/cockroachdb/cockroach/pull/135149 -[#135168]: https://github.com/cockroachdb/cockroach/pull/135168 -[#135186]: https://github.com/cockroachdb/cockroach/pull/135186 -[#135236]: https://github.com/cockroachdb/cockroach/pull/135236 -[#135293]: https://github.com/cockroachdb/cockroach/pull/135293 -[#135552]: https://github.com/cockroachdb/cockroach/pull/135552 -[#135556]: https://github.com/cockroachdb/cockroach/pull/135556 -[#135563]: https://github.com/cockroachdb/cockroach/pull/135563 -[#135596]: https://github.com/cockroachdb/cockroach/pull/135596 -[#135623]: https://github.com/cockroachdb/cockroach/pull/135623 -[#135668]: https://github.com/cockroachdb/cockroach/pull/135668 -[#135682]: https://github.com/cockroachdb/cockroach/pull/135682 -[#135778]: https://github.com/cockroachdb/cockroach/pull/135778 -[#135779]: https://github.com/cockroachdb/cockroach/pull/135779 -[#135845]: https://github.com/cockroachdb/cockroach/pull/135845 -[#135852]: https://github.com/cockroachdb/cockroach/pull/135852 -[#135910]: https://github.com/cockroachdb/cockroach/pull/135910 -[#135936]: https://github.com/cockroachdb/cockroach/pull/135936 -[#135944]: https://github.com/cockroachdb/cockroach/pull/135944 -[#135970]: https://github.com/cockroachdb/cockroach/pull/135970 -[#136076]: https://github.com/cockroachdb/cockroach/pull/136076 -[#136110]: https://github.com/cockroachdb/cockroach/pull/136110 -[#136172]: https://github.com/cockroachdb/cockroach/pull/136172 -[#136219]: https://github.com/cockroachdb/cockroach/pull/136219 -[#136265]: https://github.com/cockroachdb/cockroach/pull/136265 -[#136298]: https://github.com/cockroachdb/cockroach/pull/136298 -[#136311]: https://github.com/cockroachdb/cockroach/pull/136311 -[#136325]: https://github.com/cockroachdb/cockroach/pull/136325 -[#136463]: https://github.com/cockroachdb/cockroach/pull/136463 -[#136506]: https://github.com/cockroachdb/cockroach/pull/136506 -[#136538]: https://github.com/cockroachdb/cockroach/pull/136538 -[#136631]: https://github.com/cockroachdb/cockroach/pull/136631 -[#136632]: https://github.com/cockroachdb/cockroach/pull/136632 -[#136648]: https://github.com/cockroachdb/cockroach/pull/136648 -[#136715]: https://github.com/cockroachdb/cockroach/pull/136715 -[#136741]: https://github.com/cockroachdb/cockroach/pull/136741 -[#136804]: https://github.com/cockroachdb/cockroach/pull/136804 -[#136841]: https://github.com/cockroachdb/cockroach/pull/136841 -[#136899]: https://github.com/cockroachdb/cockroach/pull/136899 -[#136926]: https://github.com/cockroachdb/cockroach/pull/136926 -[#137024]: https://github.com/cockroachdb/cockroach/pull/137024 \ No newline at end of file +- The `nodes` endpoint should work for `shared` secondary tenants. Since nodes are common to all the tenants, this API endpoint behaves similarly to the system tenant's endpoint. #131644 diff --git a/src/current/_includes/releases/v25.1/v25.1.0-alpha.2.md b/src/current/_includes/releases/v25.1/v25.1.0-alpha.2.md index 73e7b4545f2..b1809181785 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-alpha.2.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-alpha.2.md @@ -6,12 +6,12 @@ Release Date: January 9, 2025

Backward-incompatible changes

-- Altering a paused backup schedule's recurrence or location no longer resumes the schedule. [#134829][#134829] -- `BACKUP`/`RESTORE` statements no longer return index entries and bytes backed up/restored. [#134516][#134516] +- Altering a paused backup schedule's recurrence or location no longer resumes the schedule. #134829 +- `BACKUP`/`RESTORE` statements no longer return index entries and bytes backed up/restored. #134516

General changes

-- The PTS (protected timestamp) records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. [#137548][#137548] +- The PTS (protected timestamp) records of running changefeeds are now updated when the set of targets changes, such as when system tables are added to the protected tables list. #137548

SQL language changes

@@ -20,133 +20,57 @@ Release Date: January 9, 2025 - The top K sort over a large set of rows (`1000` by default, controlled via the `distribute_sort_row_count_threshold` session variable) will now force the plan distribution. - Full table scans estimated to read a certain number of rows (fewer than `10000` by default, controlled via the new `distribute_scan_row_count_threshold` session variable) no longer force the plan distribution. - The new `always_distribute_full_scans` session variable now defaults to `true` to match the previous behavior of always distributing full scans. - - Large constrained table scans estimated to read a certain number of rows (at least `10000` by default, controlled via the `distribute_scan_row_count_threshold` session variable) will now force the plan distribution. [#137072][#137072] - - Hash and merge joins for which both inputs produce a small number of rows (less than `1000` combined by default, configurable via the `distribute_join_row_count_threshold` session variable) no longer force the plan distribution. [#137562][#137562] -- `DELETE` statements now acquire locks using the `FOR UPDATE` locking mode during their initial row scan in some cases, which improves performance for contended workloads. This behavior is configurable using the `enable_implicit_select_for_update` session variable. [#137069][#137069] -- Added support for `RETURNS TABLE` syntax when creating a user-defined function (UDF). [#137251][#137251] -- Added support for XA transactions, which allow CockroachDB to participate in distributed transactions with other resources (e.g., databases, message queues, etc.) using a two-phase commit protocol. [#129448][#129448] -- Added the `legacy_varchar_typing` session setting, which reverts the changes of [#133037](https://github.com/cockroachdb/cockroach/pull/133037) that causes the change in typing behavior described in [#137837](https://github.com/cockroachdb/cockroach/pull/137837). Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `off`. [#137844][#137844] -- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that a full scan of a partial index would not normally be considered a "full scan" for the purposes of the `AVOID_FULL_SCAN` and `NO_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, it is considered a full scan. [#137984][#137984] -- Added a new session setting `avoid_full_table_scans_in_mutations`, which when set to `true` (default), causes the optimizer to avoid planning full table scans for mutation queries if any other plan is possible. [#137984][#137984] -- `ALTER JOB ... OWNER TO` can now be used to transfer ownership of a job between users/roles. [#138139][#138139] + - Large constrained table scans estimated to read a certain number of rows (at least `10000` by default, controlled via the `distribute_scan_row_count_threshold` session variable) will now force the plan distribution. #137072 + - Hash and merge joins for which both inputs produce a small number of rows (less than `1000` combined by default, configurable via the `distribute_join_row_count_threshold` session variable) no longer force the plan distribution. #137562 +- `DELETE` statements now acquire locks using the `FOR UPDATE` locking mode during their initial row scan in some cases, which improves performance for contended workloads. This behavior is configurable using the `enable_implicit_select_for_update` session variable. #137069 +- Added support for `RETURNS TABLE` syntax when creating a user-defined function (UDF). #137251 +- Added support for XA transactions, which allow CockroachDB to participate in distributed transactions with other resources (e.g., databases, message queues, etc.) using a two-phase commit protocol. #129448 +- Added the `legacy_varchar_typing` session setting, which reverts the changes of #133037 that causes the change in typing behavior described in #137837. Specifically, it makes type-checking and overload resolution ignore the newly added "unpreferred" overloads. This setting defaults to `off`. #137844 +- Added support for a new index hint, `AVOID_FULL_SCAN`, which will prevent the optimizer from planning a full scan for the specified table if any other plan is possible. The hint can be used in the same way as other existing index hints. For example, `SELECT * FROM table_name@{AVOID_FULL_SCAN};`. This hint is similar to `NO_FULL_SCAN`, but will not error if a full scan cannot be avoided. Note that a full scan of a partial index would not normally be considered a "full scan" for the purposes of the `AVOID_FULL_SCAN` and `NO_FULL_SCAN` hints, but if the user has explicitly forced the partial index via `FORCE_INDEX=index_name`, it is considered a full scan. #137984 +- Added a new session setting `avoid_full_table_scans_in_mutations`, which when set to `true` (default), causes the optimizer to avoid planning full table scans for mutation queries if any other plan is possible. #137984 +- `ALTER JOB ... OWNER TO` can now be used to transfer ownership of a job between users/roles. #138139

Operational changes

-- Added a new `sql.exec.latency.detail` histogram metric. This metric is labeled with its statement fingerprint. Enable this feature using the `sql.stats.detailed_latency_metrics.enabled` application setting. For workloads with over a couple thousand fingerprints, we advise caution in enabling `sql.stats.detailed_latency_metrics.enabled`. For most workloads, this ranges from dozens to hundreds. Use the new `sql.query.unique.count` count metric to estimate the cardinality of the set of all statement fingerprints. [#135924][#135924] -- Added a new configurable cluster setting `kv.transaction.max_intents_and_locks` that prevents transactions from creating too many intents. [#135945][#135945] -- Added the metric `txn.count_limit_rejected`, which tracks the KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed. [#135945][#135945] -- Added the metric `txn.count_limit_on_response`, which tracks the number of KV transactions that have exceeded the count limit on a response. [#135945][#135945] -- Cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.enabled` is now `true` by default. This will enable disk-bandwidth-based admission control for range snapshot ingests. It requires the provisioned bandwidth to be set using `kvadmission.store.provisioned_bandwidth`. [#137618][#137618] -- The `changefeed.max_behind_nanos` metric now supports scoping with metric labels. [#137534][#137534] +- Added a new `sql.exec.latency.detail` histogram metric. This metric is labeled with its statement fingerprint. Enable this feature using the `sql.stats.detailed_latency_metrics.enabled` application setting. For workloads with over a couple thousand fingerprints, we advise caution in enabling `sql.stats.detailed_latency_metrics.enabled`. For most workloads, this ranges from dozens to hundreds. Use the new `sql.query.unique.count` count metric to estimate the cardinality of the set of all statement fingerprints. #135924 +- Added a new configurable cluster setting `kv.transaction.max_intents_and_locks` that prevents transactions from creating too many intents. #135945 +- Added the metric `txn.count_limit_rejected`, which tracks the KV transactions that have been aborted because they exceeded the max number of writes and locking reads allowed. #135945 +- Added the metric `txn.count_limit_on_response`, which tracks the number of KV transactions that have exceeded the count limit on a response. #135945 +- Cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.enabled` is now `true` by default. This will enable disk-bandwidth-based admission control for range snapshot ingests. It requires the provisioned bandwidth to be set using `kvadmission.store.provisioned_bandwidth`. #137618 +- The `changefeed.max_behind_nanos` metric now supports scoping with metric labels. #137534

Command-line changes

-- Previously, the `--include-files` and `--exclude-files` file filters in `cockroach debug zip` only applied to heap profiles, CPU profiles, goroutines, and logs. The filters now apply to most of the cluster-wide and per-node data captured in the debug zip. This improves `debug zip` performance. Example command: `cockroach debug zip debug.zip --redact --insecure --include-files="*" --exclude-files="*.log"`. [#136098][#136098] +- Previously, the `--include-files` and `--exclude-files` file filters in `cockroach debug zip` only applied to heap profiles, CPU profiles, goroutines, and logs. The filters now apply to most of the cluster-wide and per-node data captured in the debug zip. This improves `debug zip` performance. Example command: `cockroach debug zip debug.zip --redact --insecure --include-files="*" --exclude-files="*.log"`. #136098

DB Console changes

-- Copy-pasting links to preset timescale views on the DB Console **Metrics** page now reflects those presets accurately (e.g., a URL looking at "last 6 hours" will always show the last 6 hours and update automatically). Clicking the **Now** button on the **Metrics** page will automatically select the live updating preset most closely matching the current inverval. If you are viewing an arbitrary 4-hour interval, the "last 6 hours" preset will be selected. [#136170][#136170] +- Copy-pasting links to preset timescale views on the DB Console **Metrics** page now reflects those presets accurately (e.g., a URL looking at "last 6 hours" will always show the last 6 hours and update automatically). Clicking the **Now** button on the **Metrics** page will automatically select the live updating preset most closely matching the current inverval. If you are viewing an arbitrary 4-hour interval, the "last 6 hours" preset will be selected. #136170

Bug fixes

-- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. [#137242][#137242] -- `REGIONAL BY ROW` tables with uniqueness constraints where the region is not part of those uniqueness constraints, and which also contain non-unique indexes, will now have that uniqueness properly enforced when modified under `READ COMMITTED` isolation. This bug was introduced in v24.3.0. [#137361][#137361] -- Fixed a bug existing since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. [#137251][#137251] -- Previously, if a `STORED` computed column was added and it was a fixed-size type such as `VARCHAR(2)`, the computed values would not be checked to make sure they were not too large for the type. Now this validation is performed, which prevents an invalid computed column definition from being added to a table. [#137299][#137299] -- Previously, if a `VIRTUAL` computed column was added and it was a fixed-size type such as `VARCHAR(2)`, the computed values would not be checked to make sure they were not too large for the type. Now this validation is performed, which prevents an invalid computed column definition from being added to a table. [#137299][#137299] -- Removed duplicate columns in the Parquet output from changefeeds using CDC queries. [#136718][#136718] -- Addressed a potential memory leak when parsing client session parameters for new connections. [#137627][#137627] -- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could cause the `ALTER TABLE ... ADD COLUMN` statement to fail. [#137633][#137633] -- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. [#137567][#137567] -- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` cluster setting. This allows index creation even when the setting is enabled. [#137681][#137681] -- Fixed a bug that would cause an internal error when the result of a `RECORD`-returning `UDF` was wrapped by another expression (such as `COALESCE`) within a `VALUES` clause. [#129706][#129706] -- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug has been present since at least v23.1. [#137744][#137744] -- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. [#137860][#137860] -- Fixed a timing issue between `ALTER VIEW ... RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. [#137868][#137868] -- The `pg_catalog.pg_type` table no longer contains `NULL` values for the columns `typinput`, `typoutput`, `typreceive`, and `typsend`. `NULL` values were erroneously added for these columns for the `trigger` type in v24.3.0. This could cause unexpected errors with some client libraries. [#137928][#137928] -- `ALTER BACKUP SCHEDULE` no longer fails on schedules whose collection URI contains a space. [#137948][#137948] -- Previously in some cases, CockroachDB could encounter an internal error `comparison of two different versions of enum` when a user-defined type was modified within a transaction and the following statements read the column of that user-defined type. The bug was introduced in v24.2 and is now fixed. [#137960][#137960] -- Previously `SHOW CREATE TABLE` was showing incorrect data with regard to inverted indexes. It now shows the correct data that can be repeatedly entered back into CockroachDB to recreate the same table. [#138043][#138043] -- Resolved an issue in the Kafka sink configuration within CockroachDB, where users were previously unable to set negative GZIP compression levels. Now, users can configure the `CompressionLevel` for the Kafka sink in the range of `[-2, 9]`. [#137646][#137646] -- Users should no longer see console errors when visiting the **Databases** page directly after node/SQL pod startup. [#137993][#137993] +- Fixed a bug that caused queries against tables with user-defined types to sometimes fail with errors after restoring those tables. #137242 +- `REGIONAL BY ROW` tables with uniqueness constraints where the region is not part of those uniqueness constraints, and which also contain non-unique indexes, will now have that uniqueness properly enforced when modified under `READ COMMITTED` isolation. This bug was introduced in v24.3.0. #137361 +- Fixed a bug existing since v24.1 that would cause a set-returning UDF with `OUT` parameters to return a single row. #137251 +- Previously, if a `STORED` computed column was added and it was a fixed-size type such as `VARCHAR(2)`, the computed values would not be checked to make sure they were not too large for the type. Now this validation is performed, which prevents an invalid computed column definition from being added to a table. #137299 +- Previously, if a `VIRTUAL` computed column was added and it was a fixed-size type such as `VARCHAR(2)`, the computed values would not be checked to make sure they were not too large for the type. Now this validation is performed, which prevents an invalid computed column definition from being added to a table. #137299 +- Removed duplicate columns in the Parquet output from changefeeds using CDC queries. #136718 +- Addressed a potential memory leak when parsing client session parameters for new connections. #137627 +- Fixed an issue where adding an existing column with the `IF NOT EXISTS` option could exit too early, skipping necessary handling of the abstract syntax tree (AST). This could cause the `ALTER TABLE ... ADD COLUMN` statement to fail. #137633 +- Fixed an issue where a schema change could incorrectly cause a changefeed to fail with an assertion error like `received boundary timestamp ... of type ... before reaching existing boundary of type ...`. #137567 +- Internal scans are now exempt from the `sql.defaults.disallow_full_table_scans.enabled` cluster setting. This allows index creation even when the setting is enabled. #137681 +- Fixed a bug that would cause an internal error when the result of a `RECORD`-returning `UDF` was wrapped by another expression (such as `COALESCE`) within a `VALUES` clause. #129706 +- `CLOSE CURSOR` statements are now allowed in read-only transactions, similar to PostgreSQL. The bug has been present since at least v23.1. #137744 +- Fixed a bug where querying the `pg_catalog.pg_constraint` table while the schema changer was dropping a constraint could result in a query error. #137860 +- Fixed a timing issue between `ALTER VIEW ... RENAME` and `DROP VIEW` that caused repeated failures in the `DROP VIEW` job. #137868 +- The `pg_catalog.pg_type` table no longer contains `NULL` values for the columns `typinput`, `typoutput`, `typreceive`, and `typsend`. `NULL` values were erroneously added for these columns for the `trigger` type in v24.3.0. This could cause unexpected errors with some client libraries. #137928 +- `ALTER BACKUP SCHEDULE` no longer fails on schedules whose collection URI contains a space. #137948 +- Previously in some cases, CockroachDB could encounter an internal error `comparison of two different versions of enum` when a user-defined type was modified within a transaction and the following statements read the column of that user-defined type. The bug was introduced in v24.2 and is now fixed. #137960 +- Previously `SHOW CREATE TABLE` was showing incorrect data with regard to inverted indexes. It now shows the correct data that can be repeatedly entered back into CockroachDB to recreate the same table. #138043 +- Resolved an issue in the Kafka sink configuration within CockroachDB, where users were previously unable to set negative GZIP compression levels. Now, users can configure the `CompressionLevel` for the Kafka sink in the range of `[-2, 9]`. #137646 +- Users should no longer see console errors when visiting the **Databases** page directly after node/SQL pod startup. #137993

Performance improvements

-- The default value of cluster setting `kvadmission.flow_control.mode` has been changed from `apply_to_elastic` to `apply_to_all`. Regular writes are now subject to admission control by default, meaning that non-quorum required replicas may not be told about new writes from the leader if they are unable to keep up. This brings a large performance improvement during instances where there is a large backlog of replication work towards a subset of node(s), such as node restarts. The setting can be reverted to the v24.3 and earlier default by setting `kvadmission.flow_control.mode` to `apply_to_elastic`. [#133860][#133860] - -[#129448]: https://github.com/cockroachdb/cockroach/pull/129448 -[#129706]: https://github.com/cockroachdb/cockroach/pull/129706 -[#133860]: https://github.com/cockroachdb/cockroach/pull/133860 -[#134516]: https://github.com/cockroachdb/cockroach/pull/134516 -[#134829]: https://github.com/cockroachdb/cockroach/pull/134829 -[#135924]: https://github.com/cockroachdb/cockroach/pull/135924 -[#135945]: https://github.com/cockroachdb/cockroach/pull/135945 -[#136098]: https://github.com/cockroachdb/cockroach/pull/136098 -[#136170]: https://github.com/cockroachdb/cockroach/pull/136170 -[#136718]: https://github.com/cockroachdb/cockroach/pull/136718 -[#137069]: https://github.com/cockroachdb/cockroach/pull/137069 -[#137072]: https://github.com/cockroachdb/cockroach/pull/137072 -[#137080]: https://github.com/cockroachdb/cockroach/pull/137080 -[#137216]: https://github.com/cockroachdb/cockroach/pull/137216 -[#137242]: https://github.com/cockroachdb/cockroach/pull/137242 -[#137251]: https://github.com/cockroachdb/cockroach/pull/137251 -[#137277]: https://github.com/cockroachdb/cockroach/pull/137277 -[#137278]: https://github.com/cockroachdb/cockroach/pull/137278 -[#137299]: https://github.com/cockroachdb/cockroach/pull/137299 -[#137311]: https://github.com/cockroachdb/cockroach/pull/137311 -[#137312]: https://github.com/cockroachdb/cockroach/pull/137312 -[#137361]: https://github.com/cockroachdb/cockroach/pull/137361 -[#137432]: https://github.com/cockroachdb/cockroach/pull/137432 -[#137534]: https://github.com/cockroachdb/cockroach/pull/137534 -[#137548]: https://github.com/cockroachdb/cockroach/pull/137548 -[#137562]: https://github.com/cockroachdb/cockroach/pull/137562 -[#137567]: https://github.com/cockroachdb/cockroach/pull/137567 -[#137618]: https://github.com/cockroachdb/cockroach/pull/137618 -[#137627]: https://github.com/cockroachdb/cockroach/pull/137627 -[#137633]: https://github.com/cockroachdb/cockroach/pull/137633 -[#137641]: https://github.com/cockroachdb/cockroach/pull/137641 -[#137646]: https://github.com/cockroachdb/cockroach/pull/137646 -[#137681]: https://github.com/cockroachdb/cockroach/pull/137681 -[#137744]: https://github.com/cockroachdb/cockroach/pull/137744 -[#137748]: https://github.com/cockroachdb/cockroach/pull/137748 -[#137751]: https://github.com/cockroachdb/cockroach/pull/137751 -[#137752]: https://github.com/cockroachdb/cockroach/pull/137752 -[#137754]: https://github.com/cockroachdb/cockroach/pull/137754 -[#137755]: https://github.com/cockroachdb/cockroach/pull/137755 -[#137831]: https://github.com/cockroachdb/cockroach/pull/137831 -[#137844]: https://github.com/cockroachdb/cockroach/pull/137844 -[#137850]: https://github.com/cockroachdb/cockroach/pull/137850 -[#137860]: https://github.com/cockroachdb/cockroach/pull/137860 -[#137868]: https://github.com/cockroachdb/cockroach/pull/137868 -[#137927]: https://github.com/cockroachdb/cockroach/pull/137927 -[#137928]: https://github.com/cockroachdb/cockroach/pull/137928 -[#137930]: https://github.com/cockroachdb/cockroach/pull/137930 -[#137948]: https://github.com/cockroachdb/cockroach/pull/137948 -[#137960]: https://github.com/cockroachdb/cockroach/pull/137960 -[#137984]: https://github.com/cockroachdb/cockroach/pull/137984 -[#137993]: https://github.com/cockroachdb/cockroach/pull/137993 -[#138043]: https://github.com/cockroachdb/cockroach/pull/138043 -[#138139]: https://github.com/cockroachdb/cockroach/pull/138139 -[05f8645d4]: https://github.com/cockroachdb/cockroach/commit/05f8645d4 -[0c54b58e4]: https://github.com/cockroachdb/cockroach/commit/0c54b58e4 -[0ed446640]: https://github.com/cockroachdb/cockroach/commit/0ed446640 -[17f161901]: https://github.com/cockroachdb/cockroach/commit/17f161901 -[1859d7674]: https://github.com/cockroachdb/cockroach/commit/1859d7674 -[47771b169]: https://github.com/cockroachdb/cockroach/commit/47771b169 -[51b3f09b0]: https://github.com/cockroachdb/cockroach/commit/51b3f09b0 -[7c758d431]: https://github.com/cockroachdb/cockroach/commit/7c758d431 -[876036c5f]: https://github.com/cockroachdb/cockroach/commit/876036c5f -[8cb26df65]: https://github.com/cockroachdb/cockroach/commit/8cb26df65 -[95025dbd5]: https://github.com/cockroachdb/cockroach/commit/95025dbd5 -[9c5816acf]: https://github.com/cockroachdb/cockroach/commit/9c5816acf -[a1de65ee3]: https://github.com/cockroachdb/cockroach/commit/a1de65ee3 -[a8d331eef]: https://github.com/cockroachdb/cockroach/commit/a8d331eef -[ae04da909]: https://github.com/cockroachdb/cockroach/commit/ae04da909 -[ae3ea37f5]: https://github.com/cockroachdb/cockroach/commit/ae3ea37f5 -[aefb58262]: https://github.com/cockroachdb/cockroach/commit/aefb58262 -[c34f407ee]: https://github.com/cockroachdb/cockroach/commit/c34f407ee -[c67e40dbf]: https://github.com/cockroachdb/cockroach/commit/c67e40dbf -[d81743d9b]: https://github.com/cockroachdb/cockroach/commit/d81743d9b -[db826f826]: https://github.com/cockroachdb/cockroach/commit/db826f826 -[e8d19c442]: https://github.com/cockroachdb/cockroach/commit/e8d19c442 -[ef1ff4e15]: https://github.com/cockroachdb/cockroach/commit/ef1ff4e15 +- The default value of cluster setting `kvadmission.flow_control.mode` has been changed from `apply_to_elastic` to `apply_to_all`. Regular writes are now subject to admission control by default, meaning that non-quorum required replicas may not be told about new writes from the leader if they are unable to keep up. This brings a large performance improvement during instances where there is a large backlog of replication work towards a subset of node(s), such as node restarts. The setting can be reverted to the v24.3 and earlier default by setting `kvadmission.flow_control.mode` to `apply_to_elastic`. #133860 + diff --git a/src/current/_includes/releases/v25.1/v25.1.0-alpha.3.md b/src/current/_includes/releases/v25.1/v25.1.0-alpha.3.md index d31f0793583..7e126be8813 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-alpha.3.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-alpha.3.md @@ -6,7 +6,7 @@ Release Date: January 15, 2025

Backward-incompatible changes

-- Several metrics are redundant and have been removed. The following list maps each removed metric to an existing, identical metric. [#138786][#138786] +- Several metrics are redundant and have been removed. The following list maps each removed metric to an existing, identical metric. #138786 - Removed `sql.schema_changer.running`, which is redundant with `jobs.schema_change.currently_running`. - Removed `sql.schema_changer.successes`, which is redundant with `jobs.schema_change.resume_completed`. - Removed `sql.schema_changer.retry_errors`, which is redundant with `jobs.schema_change.resume_retry_error`. @@ -14,59 +14,36 @@ Release Date: January 15, 2025

General changes

-- When changefeeds are created with a resolved option lower than the `min_checkpoint_frequency` option, a notice is now printed, since this may lead to unexpected behavior. [#138181][#138181] -- CockroachDB binaries are now built with profile-guided optimization (PGO) enabled. [#138700][#138700] +- When changefeeds are created with a resolved option lower than the `min_checkpoint_frequency` option, a notice is now printed, since this may lead to unexpected behavior. #138181 +- CockroachDB binaries are now built with profile-guided optimization (PGO) enabled. #138700

SQL language changes

-- Users can now always see and control (pause/resume/cancel) jobs that they own. [#138178][#138178] -- CockroachDB now provides different options for `CREATE LOGICALLY REPLICATED TABLE`: `UNIDIRECTIONAL` and `BIDIRECTIONAL ON`. These options are used for `CREATE LOGICALLY REPLICATED TABLE`, but not `CREATE LOGICAL REPLICATION STREAM`. [#138244][#138244] -- `CHANGEFEED`s using named external connections now automatically update their configuration when the connection configuration changes. [#138237][#138237] -- Added support for `DO` statements embedded within PL/pgSQL routines and other `DO` statements. `DO` statements execute a block of code inline as an anonymous function. Currently, only a PL/pgSQL body is allowed. [#138709][#138709] -- Added support for `DO` statements in SQL, which allow a PL/pgSQL code block to be executed inline. [#138709][#138709] +- Users can now always see and control (pause/resume/cancel) jobs that they own. #138178 +- CockroachDB now provides different options for `CREATE LOGICALLY REPLICATED TABLE`: `UNIDIRECTIONAL` and `BIDIRECTIONAL ON`. These options are used for `CREATE LOGICALLY REPLICATED TABLE`, but not `CREATE LOGICAL REPLICATION STREAM`. #138244 +- `CHANGEFEED`s using named external connections now automatically update their configuration when the connection configuration changes. #138237 +- Added support for `DO` statements embedded within PL/pgSQL routines and other `DO` statements. `DO` statements execute a block of code inline as an anonymous function. Currently, only a PL/pgSQL body is allowed. #138709 +- Added support for `DO` statements in SQL, which allow a PL/pgSQL code block to be executed inline. #138709

Operational changes

-- If a row-level TTL job is scheduled to run and the previous scheduled job for that table is still running, the scheduled run will now be skipped rather than waiting for the previous job to complete. [#138336][#138336] -- Schema object identifiers (e.g., database names, schema names, table names, and function names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. [#136897][#136897] -- Object identifiers such as table names, schema names, function names, and type names are no longer redacted in the `SQL_SCHEMA` log channel. [#136897][#136897] -- Changed the default value of the cluster setting `admission.l0_file_count_overload_threshold` to `4000`. [#138699][#138699] -- Introduced a metric, `sql.schema_changer.object_count`, that counts the number of schema objects in the cluster. [#138786][#138786] -- Renamed the `changefeed.min_highwater_advance` cluster setting to `changefeed.resolved_timestamp.min_update_interval` to more accurately reflect its function. Its description in the automatically generated documentation has also been updated. The previous name remains usable for backward compatibility. [#138673][#138673] +- If a row-level TTL job is scheduled to run and the previous scheduled job for that table is still running, the scheduled run will now be skipped rather than waiting for the previous job to complete. #138336 +- Schema object identifiers (e.g., database names, schema names, table names, and function names) are no longer redacted when logging statements in the `EXEC` or `SQL_SCHEMA` channels. If redaction of these names is required, then the new cluster setting `sql.log.redact_names.enabled` can be set to `true`. The default value of the setting is `false`. #136897 +- Object identifiers such as table names, schema names, function names, and type names are no longer redacted in the `SQL_SCHEMA` log channel. #136897 +- Changed the default value of the cluster setting `admission.l0_file_count_overload_threshold` to `4000`. #138699 +- Introduced a metric, `sql.schema_changer.object_count`, that counts the number of schema objects in the cluster. #138786 +- Renamed the `changefeed.min_highwater_advance` cluster setting to `changefeed.resolved_timestamp.min_update_interval` to more accurately reflect its function. Its description in the automatically generated documentation has also been updated. The previous name remains usable for backward compatibility. #138673

DB Console changes

-- Added a `/debug/pprof/fgprof` endpoint to capture off-CPU stack traces. Use of this endpoint will have a noticeable impact on performance while the endpoint is being triggered. [#138779][#138779] +- Added a `/debug/pprof/fgprof` endpoint to capture off-CPU stack traces. Use of this endpoint will have a noticeable impact on performance while the endpoint is being triggered. #138779

Bug fixes

-- In the v2 **Databases > Table** page, the `CREATE` statement will now show up as expected for tables with custom schema names. [#138294][#138294] -- Fixed issues with the virtual index scan on `crdb_internal.create_type_statements`, ensuring consistent results when querying user-defined types (UDTs) across databases. [#138295][#138295] -- Queries that perform a cast from the string representation of an array containing geometry or geography types to a SQL array type will now succeed. [#138557][#138557] -- Fixed a bug that disregarded tuple labels in some cases. This could cause unexpected behavior, such as when converting a tuple to JSON with `to_jsonb`. See [#136167][#136167] for more details. This incorrect removal of tuple labels bug was introduced in v22.1.0, and changes in v24.3.0 made unexpected behavior due to the bug more likely. [#138791][#138791] - -- Fixed a bug where locks were taken on the system tables `system.users` and `system.role_options` even when `allow_role_memberships_to_change_during_transaction` was set. Now, users are able to create and drop users quickly when `allow_role_memberships_to_change_during_transaction` is set, even if there are contending transactions on `system.users` and `system.role_options`. [#137940][#137940] - -[#136167]: https://github.com/cockroachdb/cockroach/pull/136167 -[#136897]: https://github.com/cockroachdb/cockroach/pull/136897 -[#137750]: https://github.com/cockroachdb/cockroach/pull/137750 -[#137940]: https://github.com/cockroachdb/cockroach/pull/137940 -[#138178]: https://github.com/cockroachdb/cockroach/pull/138178 -[#138181]: https://github.com/cockroachdb/cockroach/pull/138181 -[#138223]: https://github.com/cockroachdb/cockroach/pull/138223 -[#138237]: https://github.com/cockroachdb/cockroach/pull/138237 -[#138244]: https://github.com/cockroachdb/cockroach/pull/138244 -[#138294]: https://github.com/cockroachdb/cockroach/pull/138294 -[#138295]: https://github.com/cockroachdb/cockroach/pull/138295 -[#138332]: https://github.com/cockroachdb/cockroach/pull/138332 -[#138334]: https://github.com/cockroachdb/cockroach/pull/138334 -[#138336]: https://github.com/cockroachdb/cockroach/pull/138336 -[#138343]: https://github.com/cockroachdb/cockroach/pull/138343 -[#138557]: https://github.com/cockroachdb/cockroach/pull/138557 -[#138673]: https://github.com/cockroachdb/cockroach/pull/138673 -[#138699]: https://github.com/cockroachdb/cockroach/pull/138699 -[#138700]: https://github.com/cockroachdb/cockroach/pull/138700 -[#138709]: https://github.com/cockroachdb/cockroach/pull/138709 -[#138779]: https://github.com/cockroachdb/cockroach/pull/138779 -[#138786]: https://github.com/cockroachdb/cockroach/pull/138786 -[#138791]: https://github.com/cockroachdb/cockroach/pull/138791 +- In the v2 **Databases > Table** page, the `CREATE` statement will now show up as expected for tables with custom schema names. #138294 +- Fixed issues with the virtual index scan on `crdb_internal.create_type_statements`, ensuring consistent results when querying user-defined types (UDTs) across databases. #138295 +- Queries that perform a cast from the string representation of an array containing geometry or geography types to a SQL array type will now succeed. #138557 +- Fixed a bug that disregarded tuple labels in some cases. This could cause unexpected behavior, such as when converting a tuple to JSON with `to_jsonb`. See #136167 for more details. This incorrect removal of tuple labels bug was introduced in v22.1.0, and changes in v24.3.0 made unexpected behavior due to the bug more likely. #138791 + +- Fixed a bug where locks were taken on the system tables `system.users` and `system.role_options` even when `allow_role_memberships_to_change_during_transaction` was set. Now, users are able to create and drop users quickly when `allow_role_memberships_to_change_during_transaction` is set, even if there are contending transactions on `system.users` and `system.role_options`. #137940 + diff --git a/src/current/_includes/releases/v25.1/v25.1.0-beta.1.md b/src/current/_includes/releases/v25.1/v25.1.0-beta.1.md index 23b1cdeb832..1b46700104b 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-beta.1.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-beta.1.md @@ -6,37 +6,23 @@ Release Date: January 20, 2025

General changes

-- CockroachDB binaries are no longer built with profile-guided optimization (PGO) enabled. [#139092][#139092] +- CockroachDB binaries are no longer built with profile-guided optimization (PGO) enabled. #139092

SQL language changes

- When you run `CREATE LOGICALLY REPLICATED TABLE`, you must specify one of the following options: - `UNIDIRECTIONAL`: Sets up a unidirectional stream with fast initial scan. - - `BIDIRECTIONAL ON {destination uri}`: Sets up a bidirectional stream from the original destination to the original source. [#138297][#138297] -- Logical data replication (LDR) and physical cluster replication (PCR) may now use the `crdb_route=gateway` query option to route the replication streams over a load balancer. [#138697][#138697] -- Updated the column name `description` to `command` in the `SHOW LOGICAL REPLICATION JOBS` responses. [#139030][#139030] + - `BIDIRECTIONAL ON {destination uri}`: Sets up a bidirectional stream from the original destination to the original source. #138297 +- Logical data replication (LDR) and physical cluster replication (PCR) may now use the `crdb_route=gateway` query option to route the replication streams over a load balancer. #138697 +- Updated the column name `description` to `command` in the `SHOW LOGICAL REPLICATION JOBS` responses. #139030

Operational changes

-- The `node decommission` CLI command now waits until the target node is fully drained before marking it as decommissioned. Previously, the command would initiate the drain process but not wait for its completion, leaving the target node in a state where it could not communicate with the cluster but would still accept client requests, causing them to hang or encounter unexpected errors. [#138732][#138732] -- The cluster setting `changefeed.frontier_highwater_lag_checkpoint_threshold` has been renamed to `changefeed.span_checkpoint.lag_threshold`. The previous name is still available for backward compatibility. [#139064][#139064] +- The `node decommission` CLI command now waits until the target node is fully drained before marking it as decommissioned. Previously, the command would initiate the drain process but not wait for its completion, leaving the target node in a state where it could not communicate with the cluster but would still accept client requests, causing them to hang or encounter unexpected errors. #138732 +- The cluster setting `changefeed.frontier_highwater_lag_checkpoint_threshold` has been renamed to `changefeed.span_checkpoint.lag_threshold`. The previous name is still available for backward compatibility. #139064

Bug fixes

-- Fixed a bounded memory leak that could occur when evaluating some memory-intensive queries using the vectorized engine. This leak had been present since v20.2. [#138804][#138804] -- Fixed a bug where columns created with `GENERATED ... BY IDENTITY` with the `SERIAL` type could incorrectly fail internal validations. [#139084][#139084] - -[#136879]: https://github.com/cockroachdb/cockroach/pull/136879 -[#137319]: https://github.com/cockroachdb/cockroach/pull/137319 -[#138283]: https://github.com/cockroachdb/cockroach/pull/138283 -[#138297]: https://github.com/cockroachdb/cockroach/pull/138297 -[#138697]: https://github.com/cockroachdb/cockroach/pull/138697 -[#138732]: https://github.com/cockroachdb/cockroach/pull/138732 -[#138804]: https://github.com/cockroachdb/cockroach/pull/138804 -[#138849]: https://github.com/cockroachdb/cockroach/pull/138849 -[#139030]: https://github.com/cockroachdb/cockroach/pull/139030 -[#139041]: https://github.com/cockroachdb/cockroach/pull/139041 -[#139042]: https://github.com/cockroachdb/cockroach/pull/139042 -[#139064]: https://github.com/cockroachdb/cockroach/pull/139064 -[#139084]: https://github.com/cockroachdb/cockroach/pull/139084 -[#139092]: https://github.com/cockroachdb/cockroach/pull/139092 +- Fixed a bounded memory leak that could occur when evaluating some memory-intensive queries using the vectorized engine. This leak had been present since v20.2. #138804 +- Fixed a bug where columns created with `GENERATED ... BY IDENTITY` with the `SERIAL` type could incorrectly fail internal validations. #139084 + diff --git a/src/current/_includes/releases/v25.1/v25.1.0-beta.2.md b/src/current/_includes/releases/v25.1/v25.1.0-beta.2.md index 139dce7dc5c..71d1e526560 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-beta.2.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-beta.2.md @@ -6,32 +6,24 @@ Release Date: January 27, 2025

SQL language changes

-- `DROP INDEX` is now labeled a [potentially unsafe SQL statement]({% link v25.1/cockroach-sql.md %}#allow-potentially-unsafe-sql-statements), so it can only be run when the `sql_safe_updates` session variable or `--safe_updates` flag is set to `false`. [#139505][#139505] -- `SHOW JOBS` is now based on a new mechanism for storing information about the progress and status of running jobs. [#139230][#139230] -- `SHOW TENANT WITH REPLICATION STATUS` now includes an `ingestion_job_id` column that displays the ID of the Physical Cluster Replication (PCR) [ingestion job]({% link v25.1/physical-cluster-replication-monitoring.md %}). [#139275][#139275] +- `DROP INDEX` is now labeled a [potentially unsafe SQL statement]({% link v25.1/cockroach-sql.md %}#allow-potentially-unsafe-sql-statements), so it can only be run when the `sql_safe_updates` session variable or `--safe_updates` flag is set to `false`. #139505 +- `SHOW JOBS` is now based on a new mechanism for storing information about the progress and status of running jobs. #139230 +- `SHOW TENANT WITH REPLICATION STATUS` now includes an `ingestion_job_id` column that displays the ID of the Physical Cluster Replication (PCR) [ingestion job]({% link v25.1/physical-cluster-replication-monitoring.md %}). #139275

Operational changes

-- Customers must pass URIs as [external connections]({% link v25.1/create-external-connection.md %}) when [setting up Logical Data Replication (LDR)]({% link v25.1/set-up-logical-data-replication.md %}). [#139506][#139506] +- Customers must pass URIs as [external connections]({% link v25.1/create-external-connection.md %}) when [setting up Logical Data Replication (LDR)]({% link v25.1/set-up-logical-data-replication.md %}). #139506

DB Console changes

-- The DB Console Cluster Settings page now uses the same redaction logic as `SHOW CLUSTER SETTINGS` when the setting [`server.redact_sensitive_settings.enabled`]({% link v25.1/cluster-settings.md %}#setting-server-redact-sensitive-settings-enabled) is set to `true`. [#139277][#139277] -- The [Overload dashboard]({% link v25.1/ui-overload-dashboard.md %}) in DB Console now displays only V2 Replication Admission Control metrics and omits their previous V2 prefix. Previously, both V1 and V2 metrics were displayed. Additionally, the aggregate size of queued replication entries is now displayed. [#139566][#139566] +- The DB Console Cluster Settings page now uses the same redaction logic as `SHOW CLUSTER SETTINGS` when the setting [`server.redact_sensitive_settings.enabled`]({% link v25.1/cluster-settings.md %}#setting-server-redact-sensitive-settings-enabled) is set to `true`. #139277 +- The [Overload dashboard]({% link v25.1/ui-overload-dashboard.md %}) in DB Console now displays only V2 Replication Admission Control metrics and omits their previous V2 prefix. Previously, both V1 and V2 metrics were displayed. Additionally, the aggregate size of queued replication entries is now displayed. #139566

Bug fixes

-- Fixed a bug where the error "batch timestamp T must be after replica GC threshold" could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the failed state. [#139251][#139251] +- Fixed a bug where the error "batch timestamp T must be after replica GC threshold" could occur during a schema change backfill operation, and cause the schema change job to retry infinitely. Now this error is treated as permanent, and will cause the job to enter the failed state. #139251

Performance improvements

-- The cluster setting `rpc.batch_stream_pool.enabled` now defaults to `false`. This supersedes an earlier release note. This cluster setting is experimental and is not listed as `public`. [#139481][#139481] +- The cluster setting `rpc.batch_stream_pool.enabled` now defaults to `false`. This supersedes an earlier release note. This cluster setting is experimental and is not listed as `public`. #139481 -[#139230]: https://github.com/cockroachdb/cockroach/pull/139230 -[#139251]: https://github.com/cockroachdb/cockroach/pull/139251 -[#139275]: https://github.com/cockroachdb/cockroach/pull/139275 -[#139277]: https://github.com/cockroachdb/cockroach/pull/139277 -[#139481]: https://github.com/cockroachdb/cockroach/pull/139481 -[#139505]: https://github.com/cockroachdb/cockroach/pull/139505 -[#139506]: https://github.com/cockroachdb/cockroach/pull/139506 -[#139566]: https://github.com/cockroachdb/cockroach/pull/139566 diff --git a/src/current/_includes/releases/v25.1/v25.1.0-beta.3.md b/src/current/_includes/releases/v25.1/v25.1.0-beta.3.md index 53f105b3244..a07e538c9cb 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-beta.3.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-beta.3.md @@ -6,23 +6,18 @@ Release Date: February 3, 2025

Operational changes

-- Reduced noise when using dynamically provisioned logging sinks. [#139644][#139644] +- Reduced noise when using dynamically provisioned logging sinks. #139644 - Added new metrics for monitoring changefeed span-level checkpoint creation: - `changefeed.checkpoint.create_nanos`, which measures the time it takes to create a changefeed checkpoint. - `changefeed.checkpoint.total_bytes`, which measures the total size of a changefeed checkpoint in bytes. - - `changefeed.checkpoint.span_count`, which measures the number of spans in a changefeed checkpoint. [#139707][#139707] + - `changefeed.checkpoint.span_count`, which measures the number of spans in a changefeed checkpoint. #139707

Command-line changes

-- Improved the performance of the debug zip query that collects `transaction_contention_events` data, which reduces the chance of `"memory budget exceeded"` or `"query execution canceled due to statement timeout"` errors. [#139755][#139755] +- Improved the performance of the debug zip query that collects `transaction_contention_events` data, which reduces the chance of `"memory budget exceeded"` or `"query execution canceled due to statement timeout"` errors. #139755

Bug fixes

-- Fixed a bug where sometimes activating diagnostics for SQL activity appears unresponsive, with no state or status update upon activating. Now, the status should always reflect that diagnostics are active, or that a statement bundle is downloadable. [#139587][#139587] -- Fixed a bug where the `plan.txt` file would be incomplete whenever CockroachDB collected a statement bundle with plan-gist-based matching. The bug had been present since the introduction of plan-gist-based matching feature in v23.1, but was partially addressed in v24.2. [#139268][#139268] +- Fixed a bug where sometimes activating diagnostics for SQL activity appears unresponsive, with no state or status update upon activating. Now, the status should always reflect that diagnostics are active, or that a statement bundle is downloadable. #139587 +- Fixed a bug where the `plan.txt` file would be incomplete whenever CockroachDB collected a statement bundle with plan-gist-based matching. The bug had been present since the introduction of plan-gist-based matching feature in v23.1, but was partially addressed in v24.2. #139268 -[#139268]: https://github.com/cockroachdb/cockroach/pull/139268 -[#139587]: https://github.com/cockroachdb/cockroach/pull/139587 -[#139644]: https://github.com/cockroachdb/cockroach/pull/139644 -[#139707]: https://github.com/cockroachdb/cockroach/pull/139707 -[#139755]: https://github.com/cockroachdb/cockroach/pull/139755 diff --git a/src/current/_includes/releases/v25.1/v25.1.0-rc.1.md b/src/current/_includes/releases/v25.1/v25.1.0-rc.1.md index 125ade072bc..f75d19e8239 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0-rc.1.md +++ b/src/current/_includes/releases/v25.1/v25.1.0-rc.1.md @@ -14,36 +14,21 @@ Release Date: February 10, 2025 ``` ALTER ROLE ALL SET autocommit_before_ddl = false; ``` - [#140156][#140156] + #140156

SQL language changes

-- Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. The previous behavior can be re-enabled by setting the cluster setting `sql.stats.non_indexed_json_histograms.enabled` to `true`. [#139898][#139898] -- Added the session setting `optimizer_prefer_bounded_cardinality` which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. [#140245][#140245] -- Added the session setting `optimizer_min_row_count` which sets a lower bound on row-count estimates for relational expressions during query planning. A value of zero, which is the default, indicates no lower bound. Note that if this is set to a value greater than zero, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than 0, such as 1, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. [#140245][#140245] -- Fixed a bug existing only in testing releases of v25.1 that could cause unexpected errors during planning for `VALUES` expressions containing function calls with multiple overloads. [#140306][#140306] -- The default setting for `plan_cache_mode` has been reverted to `force_custom_plan`, after being changed to `auto` in a [prior testing release]({% link releases/v25.1.md %}#v25-1-0-alpha-1). You can disregard the [previous release note]({% link releases/v25.1.md %}#v25-1-0-alpha-1-performance-improvements). [#140405][#140405] +- Since v23.2, table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. The previous behavior can be re-enabled by setting the cluster setting `sql.stats.non_indexed_json_histograms.enabled` to `true`. #139898 +- Added the session setting `optimizer_prefer_bounded_cardinality` which instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. #140245 +- Added the session setting `optimizer_min_row_count` which sets a lower bound on row-count estimates for relational expressions during query planning. A value of zero, which is the default, indicates no lower bound. Note that if this is set to a value greater than zero, a row count of zero can still be estimated for expressions with a cardinality of zero, e.g., for a contradictory filter. Setting this to a value higher than 0, such as 1, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. #140245 +- Fixed a bug existing only in testing releases of v25.1 that could cause unexpected errors during planning for `VALUES` expressions containing function calls with multiple overloads. #140306 +- The default setting for `plan_cache_mode` has been reverted to `force_custom_plan`, after being changed to `auto` in a [prior testing release]({% link releases/v25.1.md %}#v25-1-0-alpha-1). You can disregard the [previous release note]({% link releases/v25.1.md %}#v25-1-0-alpha-1-performance-improvements). #140405

Bug fixes

-- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `batch timestamp ... must be after replica GC threshold` error. [#140078][#140078] -- Fixed a bug existing only in testing releases of v25.1 that could cause the creation of a PL/pgSQL routine with a CTE to fail with an error similar to: `unexpected root expression: with`. [#140145][#140145] -- Fixed a rare bug in which a query might fail with error `could not find computed column expression for column ... in table` while dropping a virtual computed column from the table. This bug was introduced in v23.2.4. [#139834][#139834] -- Configuring replication controls on a partition name of an index that is not unique across all indexes will now correctly impact only that partition. [#140293][#140293] -- The Data Distribution page in Advanced Debug will no longer crash if there are `NULL` values for `raw_sql_config` in `crdb_internal.zones`. [#140609][#140609] - -[#139834]: https://github.com/cockroachdb/cockroach/pull/139834 -[#139898]: https://github.com/cockroachdb/cockroach/pull/139898 -[#140078]: https://github.com/cockroachdb/cockroach/pull/140078 -[#140089]: https://github.com/cockroachdb/cockroach/pull/140089 -[#140145]: https://github.com/cockroachdb/cockroach/pull/140145 -[#140156]: https://github.com/cockroachdb/cockroach/pull/140156 -[#140245]: https://github.com/cockroachdb/cockroach/pull/140245 -[#140252]: https://github.com/cockroachdb/cockroach/pull/140252 -[#140293]: https://github.com/cockroachdb/cockroach/pull/140293 -[#140306]: https://github.com/cockroachdb/cockroach/pull/140306 -[#140405]: https://github.com/cockroachdb/cockroach/pull/140405 -[#140609]: https://github.com/cockroachdb/cockroach/pull/140609 -[2ea91e321]: https://github.com/cockroachdb/cockroach/commit/2ea91e321 -[62c075413]: https://github.com/cockroachdb/cockroach/commit/62c075413 -[76944423e]: https://github.com/cockroachdb/cockroach/commit/76944423e +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `batch timestamp ... must be after replica GC threshold` error. #140078 +- Fixed a bug existing only in testing releases of v25.1 that could cause the creation of a PL/pgSQL routine with a CTE to fail with an error similar to: `unexpected root expression: with`. #140145 +- Fixed a rare bug in which a query might fail with error `could not find computed column expression for column ... in table` while dropping a virtual computed column from the table. This bug was introduced in v23.2.4. #139834 +- Configuring replication controls on a partition name of an index that is not unique across all indexes will now correctly impact only that partition. #140293 +- The Data Distribution page in Advanced Debug will no longer crash if there are `NULL` values for `raw_sql_config` in `crdb_internal.zones`. #140609 + diff --git a/src/current/_includes/releases/v25.1/v25.1.0.md b/src/current/_includes/releases/v25.1/v25.1.0.md index bfb6205b994..de85dab0b80 100644 --- a/src/current/_includes/releases/v25.1/v25.1.0.md +++ b/src/current/_includes/releases/v25.1/v25.1.0.md @@ -95,4 +95,3 @@ Docs | [SQL Feature Support]({% link v25.1/sql-feature-support.m Docs | [Change Data Capture Overview]({% link v25.1/change-data-capture-overview.md %}) | This page summarizes CockroachDB's data streaming capabilities. Change data capture (CDC) provides efficient, distributed, row-level changefeeds into a configurable sink for downstream processing such as reporting, caching, or full-text indexing. Docs | [Backup Architecture]({% link v25.1/backup-architecture.md %}) | This page describes the backup job workflow with a high-level overview, diagrams, and more details on each phase of the job. -[#115166]: https://github.com/cockroachdb/cockroach/pull/115166 diff --git a/src/current/_includes/releases/v25.1/v25.1.1.md b/src/current/_includes/releases/v25.1/v25.1.1.md index 6be1de3139a..daaa363d41a 100644 --- a/src/current/_includes/releases/v25.1/v25.1.1.md +++ b/src/current/_includes/releases/v25.1/v25.1.1.md @@ -7,7 +7,6 @@ Release Date: March 12, 2025

Bug fixes

- Improved S3 credential caching for STS credentials to avoid exceeding the Amazon metadata service rate limit and encountering errors related to AssumeRole API calls when accessing large numbers of files in larger clusters. - [#142677][#142677] + #142677 -[#142677]: https://github.com/cockroachdb/cockroach/pull/142677 diff --git a/src/current/_includes/releases/v25.1/v25.1.10.md b/src/current/_includes/releases/v25.1/v25.1.10.md index 1d8c980475c..61acb0a559c 100644 --- a/src/current/_includes/releases/v25.1/v25.1.10.md +++ b/src/current/_includes/releases/v25.1/v25.1.10.md @@ -7,7 +7,6 @@ Release Date: August 1, 2025

Bug fixes

- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. - [#151097][#151097] + #151097 -[#151097]: https://github.com/cockroachdb/cockroach/pull/151097 diff --git a/src/current/_includes/releases/v25.1/v25.1.2.md b/src/current/_includes/releases/v25.1/v25.1.2.md index fb6e996df62..319b22db157 100644 --- a/src/current/_includes/releases/v25.1/v25.1.2.md +++ b/src/current/_includes/releases/v25.1/v25.1.2.md @@ -7,46 +7,34 @@ Release Date: March 12, 2025

SQL language changes

- Added the `optimizer_check_input_min_row_count` session setting to control the minimum row count estimate for buffer scans of foreign key and uniqueness checks. It defaults to `0`. - [#141374][#141374] + #141374

DB Console changes

- The **Paused Follower** graph has been removed from the **Replication Dashboard** in DB Console because followers are no longer paused by default in CockroachDB v25.1 and later. - [#141502][#141502] + #141502

Bug fixes

- Fixed a bug that prevented starting multi-table logical data replication (LDR) streams on tables that contained user-defined types. - [#141643][#141643] + #141643 - Fixed a bug where dropping a table with a trigger using the legacy schema changer could leave an orphaned reference in the descriptor. This issue occurred when two tables depended on each other via a trigger, and the table containing the trigger was dropped. - [#141181][#141181] + #141181 - Fixed a bug that could cause the upgrade to v25.1 to crash if a job was missing from the virtual table, such as when a malformed job had no payload information. - [#142312][#142312] + #142312 - A step in the v25.1 upgrade finalization process that required backfilling jobs now uses locks to ensure it makes progress even when there is contention on the jobs table, which prevents the possibility of becoming stuck under heavy load. - [#141460][#141460] + #141460 - Fixed a bug that could cause concurrent DML statements to prevent primary key changes from succeeding. - [#141383][#141383] + #141383 - Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index where the primary key column is used as the predicate expression. - [#141822][#141822] + #141822 - Fixed a bug that could cause `SHOW CREATE TABLE` to fail if a database was offline (e.g., due to a `RESTORE` on that database). - [#141505][#141505] + #141505 - Fixed a bug that prevented transaction retry errors encountered during implicit transactions from being automatically retried internally if the `autocommit_before_ddl` session variable was enabled and the statement was a schema change. - [#141393][#141393] + #141393 - Fixed a bug that could cause `nil pointer dereference` errors when executing statements with user-defined functions (UDFs) or certain built-in functions, such as `obj_description`. - [#141653][#141653] + #141653 - Improved S3 credential caching for STS credentials to avoid exceeding the Amazon metadata service rate limit and encountering errors related to AssumeRole API calls when accessing large numbers of files in larger clusters. - [#142690][#142690] - - -[#142690]: https://github.com/cockroachdb/cockroach/pull/142690 -[#141502]: https://github.com/cockroachdb/cockroach/pull/141502 -[#141643]: https://github.com/cockroachdb/cockroach/pull/141643 -[#141181]: https://github.com/cockroachdb/cockroach/pull/141181 -[#142312]: https://github.com/cockroachdb/cockroach/pull/142312 -[#141460]: https://github.com/cockroachdb/cockroach/pull/141460 -[#141505]: https://github.com/cockroachdb/cockroach/pull/141505 -[#141653]: https://github.com/cockroachdb/cockroach/pull/141653 -[#141374]: https://github.com/cockroachdb/cockroach/pull/141374 -[#141383]: https://github.com/cockroachdb/cockroach/pull/141383 -[#141822]: https://github.com/cockroachdb/cockroach/pull/141822 -[#141393]: https://github.com/cockroachdb/cockroach/pull/141393 + #142690 + + diff --git a/src/current/_includes/releases/v25.1/v25.1.3.md b/src/current/_includes/releases/v25.1/v25.1.3.md index 4ecfdd67e6f..3559f1c6ddf 100644 --- a/src/current/_includes/releases/v25.1/v25.1.3.md +++ b/src/current/_includes/releases/v25.1/v25.1.3.md @@ -7,89 +7,65 @@ Release Date: April 2, 2025

General changes

- When changefeeds are created with a `resolved` option lower than the `min_checkpoint_frequency` option, a message was printed to inform the user. This message is now a notice, and includes extra information if either option was a default. - [#142154][#142154] + #142154

Operational changes

- Added the cluster setting `server.child_metrics.include_aggregate.enabled` (default: `true`) that controls the behavior of Prometheus child metrics reporting (`/_status/vars`). When set to `true`, child metrics include an aggregate time series, maintaining the existing behavior. When set to `false`, it stops reporting the aggregate time series, preventing double counting when querying metrics. - [#142746][#142746] + #142746 - Added the `sql.statement_timeout.count` metric to track the number of SQL statements that fail due to exceeding the statement timeout. - [#142155][#142155] + #142155 - Added the `sql.transaction_timeout.count` metric to track the number of SQL statements that fail due to exceeding the transaction timeout. - [#142155][#142155] + #142155

Bug fixes

- Fixed a crash due to `use of enum metadata before hydration` when using logical data replication (LDR) with user-defined types. - [#143388][#143388] + #143388 - Fixed an issue where dropping a database with triggers could fail due to an undropped backreference to a trigger function. - [#142727][#142727] + #142727 - Fixed a bug in `v24.1.14`, `v24.3.7`, `v24.3.8`, and `v25.1` that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. - [#143632][#143632] + #143632 - A step in the 25.1 upgrade finalization process that required backfilling jobs now uses locks to ensure it makes progress even when there is contention on the jobs table to prevent the possibility of becoming stuck under heavy load. - [#142223][#142223] + #142223 - Fixed a bug where the declarative schema changer allowed `CREATE SEQUENCE` operations to proceed even while a `DROP SCHEMA` or `DROP DATABASE` was in progress. Such operations now retry if the parent object has a schema change in progress, preventing new child objects from being created under deleted parent objects. - [#142764][#142764] + #142764 - Fixed a bug when running with the `autocommit_before_ddl` session variable that could cause a runtime error when binding a previously prepared DDL statement. - [#141851][#141851] + #141851 - Fixed a bug that would prevent `CREATE TRIGGER` and `DROP TRIGGER` statements from working if the `autocommit_before_ddl` setting was enabled, and if the statement was either sent as a prepared statement or as part of a batch of multiple statements. - [#142302][#142302] + #142302 - Fixed a bug where CockroachDB could incorrectly evaluate casts to some OID types (like `REGCLASS`) in some cases. The bug had been present since at least v22.1. - [#141956][#141956] + #141956 - Fixed a bug where replication controls on indexes and partitions would not get properly updated with their new IDs during index backfills, effectively discarding the replication controls set on them before the backfill. - [#142844][#142844] + #142844 - Fixed a bug where `EXPLAIN ANALYZE` output could incorrectly show `distribution: full` and not `distribution: local` in some cases when the physical plan was only running on the gateway node. The bug had been present since before v23.1, and did not apply to `EXPLAIN` statements. - [#142938][#142938] + #142938 - The TTL deletion job now includes a retry mechanism that progressively reduces the batch size when encountering contention. This improves the chances of successful deletion without requiring manual adjustments to TTL knobs. Also added the `jobs.row_level_ttl.num_delete_batch_retries` metric to track the number of times the TTL job had to reduce the batch size and try again. - [#142324][#142324] + #142324 - Fixed a bug where the fraction completed and internal checkpoints during an index backfill operation would stop getting written if any of the periodic fraction/checkpoint write operations failed. Additional logging was added so that progress is logged in addition to being written to the job record. This bug affected schema change operations such as creating an index or adding a non-nullable column to a table. - [#141788][#141788] + #141788 - Fixed a bug which would send a replica outside of a tenant's known region when `SURVIVE REGION FAILURE` was set and exactly 3 regions were configured. - [#142980][#142980] + #142980 - Fixed a bug that could cause the upgrade to v25.1 to crash if a job was missing from the virtual table. For example, if a malformed job had no payload information. - [#142311][#142311] + #142311 - Fixed a bug where during validation of a table-level zone config, inherited values were incorrectly populated from the default range instead of from the parent database. - [#142865][#142865] + #142865 - Fixed a bug in client certificate expiration metrics, `security.certificate.expiration.client` and `security.certificate.ttl.client`. - [#142916][#142916] + #142916 - Fixed a bug where a node that was drained as part of decommissioning may have interrupted SQL connections that were still active during drain (and for which drain would have been expected to wait). - [#141769][#141769] + #141769 - Physical Cluster Replication (PCR) reader catalogs could have orphaned rows in `system.namespace` after an object is renamed. - [#142872][#142872] + #142872

Miscellaneous

- Updated the `CREATE TRIGGER` `only implemented in the declarative schema changer` error message to include a helpful suggestion and link to relevant docs. - [#141872][#141872] + #141872 - When configuring the `sql.ttl.default_delete_rate_limit` cluster setting, a notice is displayed informing the user that the TTL rate limit is per leaseholder per table with a link to the docs. - [#142834][#142834] + #142834 - When configuring the `sql.ttl.default_delete_rate_limit` cluster setting, a notice is displayed informing the user that the TTL rate limit is per leaseholder per table with a link to the docs. - [#142834][#142834] + #142834 - Improved S3 credential caching for STS credentials to avoid exceeding the Amazon metadata service rate limit and encountering errors related to AssumeRole API calls when accessing large numbers of files in larger clusters. - [#142438][#142438] + #142438 -[#142223]: https://github.com/cockroachdb/cockroach/pull/142223 -[#141788]: https://github.com/cockroachdb/cockroach/pull/141788 -[#142311]: https://github.com/cockroachdb/cockroach/pull/142311 -[#141769]: https://github.com/cockroachdb/cockroach/pull/141769 -[#141872]: https://github.com/cockroachdb/cockroach/pull/141872 -[#142438]: https://github.com/cockroachdb/cockroach/pull/142438 -[#142154]: https://github.com/cockroachdb/cockroach/pull/142154 -[#142155]: https://github.com/cockroachdb/cockroach/pull/142155 -[#142302]: https://github.com/cockroachdb/cockroach/pull/142302 -[#141956]: https://github.com/cockroachdb/cockroach/pull/141956 -[#142324]: https://github.com/cockroachdb/cockroach/pull/142324 -[#142980]: https://github.com/cockroachdb/cockroach/pull/142980 -[#142746]: https://github.com/cockroachdb/cockroach/pull/142746 -[#143632]: https://github.com/cockroachdb/cockroach/pull/143632 -[#142865]: https://github.com/cockroachdb/cockroach/pull/142865 -[#142916]: https://github.com/cockroachdb/cockroach/pull/142916 -[#142872]: https://github.com/cockroachdb/cockroach/pull/142872 -[#142834]: https://github.com/cockroachdb/cockroach/pull/142834 -[#141851]: https://github.com/cockroachdb/cockroach/pull/141851 -[#142844]: https://github.com/cockroachdb/cockroach/pull/142844 -[#142764]: https://github.com/cockroachdb/cockroach/pull/142764 -[#142938]: https://github.com/cockroachdb/cockroach/pull/142938 -[#143388]: https://github.com/cockroachdb/cockroach/pull/143388 -[#142727]: https://github.com/cockroachdb/cockroach/pull/142727 diff --git a/src/current/_includes/releases/v25.1/v25.1.4.md b/src/current/_includes/releases/v25.1/v25.1.4.md index 63b2e689f44..1da97291866 100644 --- a/src/current/_includes/releases/v25.1/v25.1.4.md +++ b/src/current/_includes/releases/v25.1/v25.1.4.md @@ -7,7 +7,6 @@ Release Date: April 9, 2025

Bug fixes

- Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144059][#144059] + #144059 -[#144059]: https://github.com/cockroachdb/cockroach/pull/144059 diff --git a/src/current/_includes/releases/v25.1/v25.1.5.md b/src/current/_includes/releases/v25.1/v25.1.5.md index 0d31ec07561..456e5ca6bff 100644 --- a/src/current/_includes/releases/v25.1/v25.1.5.md +++ b/src/current/_includes/releases/v25.1/v25.1.5.md @@ -6,6 +6,5 @@ Release Date: April 28, 2025

Bug fixes

-- Fixed a rare corruption bug that impacts import and materialized views. [#144662][#144662] +- Fixed a rare corruption bug that impacts import and materialized views. #144662 -[#144662]: https://github.com/cockroachdb/cockroach/pull/144662 diff --git a/src/current/_includes/releases/v25.1/v25.1.6.md b/src/current/_includes/releases/v25.1/v25.1.6.md index 1c4579338be..7ea6b7159df 100644 --- a/src/current/_includes/releases/v25.1/v25.1.6.md +++ b/src/current/_includes/releases/v25.1/v25.1.6.md @@ -7,73 +7,53 @@ Release Date: April 30th, 2025

SQL language changes

- Added the `WITH IGNORE_FOREIGN_KEYS` option to `SHOW CREATE TABLE` which omits foreign key constraints from the output schema. This option is also allowed in `SHOW CREATE VIEW`, but has no effect. It cannot be combined with the `WITH REDACT` option. - [#142161][#142161] + #142161 - `EXPLAIN ANALYZE` statements now display the number of transaction retries and time spent retrying, if non-zero, in the plan output. - [#142928][#142928] + #142928 - A new `execution time` statistic is now reported on `EXPLAIN ANALYZE` output for most operators. Previously, this statistic was only available on the DistSQL diagrams in `EXPLAIN ANALYZE (DISTSQL)` output. - [#143895][#143895] + #143895

Operational changes

- The `sys.cpu.host.combined.percent-normalized` metric has been updated to include additional counters for more accurate host CPU measurement and to reduce underreporting. It now accounts for time spent processing hardware (`irq`) and software (`softirq`) interrupts, as well as `nice` time, which represents low-priority user-mode activity. - [#142906][#142906] + #142906 - The `server.client_cert_expiration_cache.capacity` cluster setting has been removed. The `security.certificate.expiration.client` and `security.certificate.ttl.client` metrics now report the lowest value observed for a user in the last 24 hours. - [#143729][#143729] + #143729

Bug fixes

- Previously, fast failback for physical cluster replication (PCR) could succeed even if the destination cluster protected timestamp had been removed, causing the reverse stream to enter a crashing loop. This fix ensures the failback command fast fails. - [#142922][#142922] + #142922 - The reader virtual cluster now starts if the user begins a physical cluster replication (PCR) stream from a cursor via `ALTER VIRTUAL CLUSTER virtual_cluster START REPLICATION OF virtual_cluster ON physical_cluster WITH READ VIRTUAL CLUSTER`. - [#143141][#143141] + #143141 - Fixed a bug that caused changefeeds to fail on startup when scanning a single key. - [#143150][#143150] + #143150 - MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amount of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. - [#143277][#143277] + #143277 - Fixed a bug where calling a stored procedure could drop the procedure if it had `OUT` parameters that were not used by the calling routine. This bug had existed since PL/pgSQL `CALL` statements were introduced in v24.1. - [#143290][#143290] + #143290 - Fixed a bug where CockroachDB would encounter an internal error when decoding the gists of plans with `CALL` statements. The bug had been present since v23.2. - [#143315][#143315] + #143315 - Fixed a crash due to `use of enum metadata before hydration` when using logical data replication (LDR) with user-defined types. - [#143373][#143373] + #143373 - Fixed a bug where a GC threshold error (which appears as "batch timestamp must be after replica GC threshold ...") could cause a schema change that backfills data to fail. Now, the error will cause the backfill to be retried at a higher timestamp to avoid the error. - [#143520][#143520] + #143520 - Fixed a bug in `v24.1.14`, `v24.3.7`, `v24.3.8`, and `v25.1` that could cause a nil-pointer error when a column's default expression contained a volatile expression (like `nextval`) as a UDF argument. - [#143634][#143634] + #143634 - Fixed a potential deadlock that could occur during client certificate updates while metrics were being collected. This issue affected the reliability of certificate expiration reporting. - [#143729][#143729] + #143729 - Previously, the fields `maximum memory usage` and `max sql temp disk usage` in the `EXPLAIN ANALYZE` output could be under-reported for distributed plans when memory-intensive operations were fully performed on the remote nodes. This is now fixed. The bug existed in v22.1 and later. - [#143794][#143794] + #143794 - The `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER` syntax is now supported for adding a reader virtual cluster for an existing Physical Cluster Replication (PCR) standby. - [#143853][#143853] + #143853 - Fixed a bug where CockroachDB could encounter a `cannot specify timestamp older than ...` error during table statistics collection in some cases (e.g., when the cluster is overloaded). The bug was present since v19.1. - [#144018][#144018] + #144018 - Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144031][#144031] + #144031 - Fixed a bug that could lead to a node stall. - [#146410][#146410] + #146410

Performance improvements

- Schema changes that require data to be backfilled no longer hold a protected timestamp for the entire duration of the backfill; this means there is less overhead caused by MVCC garbage collection after the backfill completes. - [#143520][#143520] - -[#142928]: https://github.com/cockroachdb/cockroach/pull/142928 -[#143729]: https://github.com/cockroachdb/cockroach/pull/143729 -[#143277]: https://github.com/cockroachdb/cockroach/pull/143277 -[#143853]: https://github.com/cockroachdb/cockroach/pull/143853 -[#144018]: https://github.com/cockroachdb/cockroach/pull/144018 -[#144031]: https://github.com/cockroachdb/cockroach/pull/144031 -[#143895]: https://github.com/cockroachdb/cockroach/pull/143895 -[#143141]: https://github.com/cockroachdb/cockroach/pull/143141 -[#143150]: https://github.com/cockroachdb/cockroach/pull/143150 -[#143290]: https://github.com/cockroachdb/cockroach/pull/143290 -[#143373]: https://github.com/cockroachdb/cockroach/pull/143373 -[#142161]: https://github.com/cockroachdb/cockroach/pull/142161 -[#142906]: https://github.com/cockroachdb/cockroach/pull/142906 -[#142922]: https://github.com/cockroachdb/cockroach/pull/142922 -[#143315]: https://github.com/cockroachdb/cockroach/pull/143315 -[#143520]: https://github.com/cockroachdb/cockroach/pull/143520 -[#143634]: https://github.com/cockroachdb/cockroach/pull/143634 -[#143794]: https://github.com/cockroachdb/cockroach/pull/143794 -[#146410]: https://github.com/cockroachdb/cockroach/pull/146410 \ No newline at end of file + #143520 diff --git a/src/current/_includes/releases/v25.1/v25.1.7.md b/src/current/_includes/releases/v25.1/v25.1.7.md index 2b105097149..d773a001679 100644 --- a/src/current/_includes/releases/v25.1/v25.1.7.md +++ b/src/current/_includes/releases/v25.1/v25.1.7.md @@ -7,72 +7,51 @@ Release Date: May 28, 2025

DB Console changes

- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. - [#145989][#145989] + #145989

Operational changes

- SQL queries run on the source cluster by logical data replication (LDR) and physical cluster replication (PCR) will now account to internal metrics like `sql.statements.active.internal` instead of metrics like `sql.statements.active` that are used to monitor application workload. - [#145110][#145110] + #145110

Bug fixes

- Fixed a bug where using values `changefeed.aggregator.flush_jitter` and `min_checkpoint_frequency` such that `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. - [#144426][#144426] + #144426 - Fixed a bug in the DB Console where the **Drop unused index** tag appeared multiple times for an index on the **Indexes** tab of the table details page. - [#144655][#144655] + #144655 - Fixed a bug in the DB Console where tables with page size dropdowns failed to update when a new page size option was selected. Tables now update correctly. - [#144766][#144766] -- Fixed a bug that could potentially cause a changefeed to erroneously complete when one of its watched tables encounters a schema change. [#144779][#144779] -- Fixed a bug where the **Schedules** page displayed only a subset of a cluster's schedules. The **Schedules** page now correctly displays all schedules. [#144806][#144806] + #144766 +- Fixed a bug that could potentially cause a changefeed to erroneously complete when one of its watched tables encounters a schema change. #144779 +- Fixed a bug where the **Schedules** page displayed only a subset of a cluster's schedules. The **Schedules** page now correctly displays all schedules. #144806 - Fixed a bug where manually updating the `show` or `status` parameters in the URL (e.g., `http://127.0.0.1:8080/#/schedules?status=ACTIVE&show=50`) caused the **Schedules** page to fail to load. - [#144806][#144806] + #144806 - Fixed a bug in the **SQL Activity Statements** page where filtering by **Statement Type** returned no results. The filter now works as expected. - [#144852][#144852] + #144852 - Improve the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. - [#145070][#145070] + #145070 - Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. - [#145371][#145371] + #145371 - Fixed a bug where running `DROP INDEX` on a hash-sharded index did not properly detect dependencies from functions and procedures on the shard column. This caused the `DROP INDEX` statement to fail with an internal validation error. Now the statement returns a correct error message, and using `DROP INDEX ... CASCADE` works as expected by dropping the dependent functions and procedures. - [#145384][#145384] + #145384 - Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. - [#145544][#145544] + #145544 - Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. - [#145575][#145575] + #145575 - Fixed a bug that could cause a row-level TTL job to fail with the error "comparison of two different versions of enum" if an `ENUM` type referenced by the table experienced a schema change. - [#145916][#145916] + #145916 - Fixed a bug where the physical cluster replication (PCR) reader catalog job could hit validation errors when schema objects had dependencies between them (for example, when a sequence's default expression was being removed). - [#145998][#145998] + #145998 - Fixed a bug where orphaned leases were not properly cleaned up. - [#146088][#146088] + #146088 - Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. - [#146197][#146197] + #146197 - Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. - [#146325][#146325] + #146325 - Fixed a bug that could lead to a node stall. - [#146408][#146408] + #146408 - Fixed a bug where an invalid comment in the `system.comment` table for a schema object could make it inacessible. - [#146417][#146417] + #146417 - Fixed a rare corruption bug that impacts import and materialized views. - [#144662][#144662] + #144662 -[#146325]: https://github.com/cockroachdb/cockroach/pull/146325 -[#146408]: https://github.com/cockroachdb/cockroach/pull/146408 -[#144852]: https://github.com/cockroachdb/cockroach/pull/144852 -[#145575]: https://github.com/cockroachdb/cockroach/pull/145575 -[#144766]: https://github.com/cockroachdb/cockroach/pull/144766 -[#144779]: https://github.com/cockroachdb/cockroach/pull/144779 -[#144806]: https://github.com/cockroachdb/cockroach/pull/144806 -[#145384]: https://github.com/cockroachdb/cockroach/pull/145384 -[#145544]: https://github.com/cockroachdb/cockroach/pull/145544 -[#145916]: https://github.com/cockroachdb/cockroach/pull/145916 -[#145989]: https://github.com/cockroachdb/cockroach/pull/145989 -[#144426]: https://github.com/cockroachdb/cockroach/pull/144426 -[#145998]: https://github.com/cockroachdb/cockroach/pull/145998 -[#146417]: https://github.com/cockroachdb/cockroach/pull/146417 -[#145070]: https://github.com/cockroachdb/cockroach/pull/145070 -[#144662]: https://github.com/cockroachdb/cockroach/pull/144662 -[#146088]: https://github.com/cockroachdb/cockroach/pull/146088 -[#146197]: https://github.com/cockroachdb/cockroach/pull/146197 -[#145110]: https://github.com/cockroachdb/cockroach/pull/145110 -[#144655]: https://github.com/cockroachdb/cockroach/pull/144655 -[#145371]: https://github.com/cockroachdb/cockroach/pull/145371 diff --git a/src/current/_includes/releases/v25.1/v25.1.8.md b/src/current/_includes/releases/v25.1/v25.1.8.md index 3122e133a45..87f6906afa6 100644 --- a/src/current/_includes/releases/v25.1/v25.1.8.md +++ b/src/current/_includes/releases/v25.1/v25.1.8.md @@ -7,42 +7,30 @@ Release Date: June 25, 2025

Bug fixes

- Fixed a bug where a CockroachDB node could crash when executing `DO` statements that contain currently unsupported DDL statements like `CREATE TYPE` in a non-default configuration (additional logging needed to be enabled, e.g., via the `sql.log.all_statements.enabled` cluster setting). This bug was introduced in v25.1. - [#146509][#146509] + #146509 - Fixed a bug where the `kv.rangefeed.closed_timestamp.slow_ranges` would not be incremented when a rangefeed closed timestamp was slower than the target threshold. - [#146973][#146973] + #146973 - Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. - [#147222][#147222] + #147222 - Fixed a bug that could cause an `AFTER` trigger to fail with `client already committed or rolled back the transaction` if the query also contained foreign-key cascades. The bug had existed since `AFTER` triggers were introduced in v24.3. - [#147310][#147310] + #147310 - Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). - [#147332][#147332] + #147332 - Previously, CockroachDB could incorrectly evaluate the `to_regclass`, `to_regnamespace`, `to_regproc`, `to_regprocedure`, `to_regrole`, and `to_regtype` built-in functions when the query using them was evaluated in a distributed fashion. The bug was introduced with these built-in functions in v23.1 and is now fixed. - [#147375][#147375] + #147375 - Fixed a bug that caused the optimizer to ignore index hints when optimizing some forms of prepared statements. This could result in one of two unexpected behaviors: a query errors with the message `index cannot be used for this query` when the index can actually be used; or a query uses an index that does not adhere to the hint. The hints relevant to this bug are regular index hints, e.g., `SELECT * FROM tab@index`, `FORCE_INVERTED_INDEX`, and `FORCE_ZIGZAG`. - [#147416][#147416] + #147416 - Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in v23.2.22, v24.1.14, v24.3.9, v25.1.2, and the v25.2 alpha. - [#147459][#147459] + #147459 - Fixed a bug where prepared statements on schema changes could fail with runtime errors. - [#147670][#147670] + #147670 - Fixed a bug where `ALTER TABLE` was modifying identity attributes on columns not backed by a sequence. - [#147710][#147710] + #147710 - Fixed an issue with logical data replication where the presence of a unique index may cause spurious dead-letter queue (DLQ) entries if the unique index has a smaller index ID than the primary key index. - [#147353][#147353] + #147353

Performance improvements

- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. - [#147208][#147208] + #147208 -[#147353]: https://github.com/cockroachdb/cockroach/pull/147353 -[#147332]: https://github.com/cockroachdb/cockroach/pull/147332 -[#147375]: https://github.com/cockroachdb/cockroach/pull/147375 -[#147416]: https://github.com/cockroachdb/cockroach/pull/147416 -[#147670]: https://github.com/cockroachdb/cockroach/pull/147670 -[#147208]: https://github.com/cockroachdb/cockroach/pull/147208 -[#147710]: https://github.com/cockroachdb/cockroach/pull/147710 -[#146509]: https://github.com/cockroachdb/cockroach/pull/146509 -[#146973]: https://github.com/cockroachdb/cockroach/pull/146973 -[#147222]: https://github.com/cockroachdb/cockroach/pull/147222 -[#147310]: https://github.com/cockroachdb/cockroach/pull/147310 -[#147459]: https://github.com/cockroachdb/cockroach/pull/147459 diff --git a/src/current/_includes/releases/v25.1/v25.1.9.md b/src/current/_includes/releases/v25.1/v25.1.9.md index 62eeb044152..111ed02f60c 100644 --- a/src/current/_includes/releases/v25.1/v25.1.9.md +++ b/src/current/_includes/releases/v25.1/v25.1.9.md @@ -7,23 +7,23 @@ Release Date: July 28, 2025

SQL language changes

- Added a session variable `initial_retry_backoff_for_read_committed` that controls the initial backoff duration when retrying an individual statement in an explicit `READ COMMITTED` transaction. A duration of `0` disables exponential backoff. If a statement in an explicit `READ COMMITTED` transaction is failing with the `40001` error `ERROR: restart transaction: read committed retry limit exceeded; set by max_retries_for_read_committed=...`, then you should set `initial_retry_backoff_for_read_committed` to a duration proportional to the typical execution time of the statement (in addition to also increasing `max_retries_for_read_committed`). - [#148223][#148223] + #148223 - Added the metrics `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count`, which count the number of automatic retries of SQL transactions and statements, respectively, within the database. These metrics differ from the related `txn.restarts.*` metrics, which count retryable errors emitted by the KV layer that must be retried. The new `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count` metrics count auto-retry actions taken by the SQL layer in response to some of those retryable errors. - [#148223][#148223] + #148223

DB Console changes

- Updated the "Learn more" link on the **Hot Ranges** page to direct users to a newer, more comprehensive reference guide about hotspots. - [#148521][#148521] + #148521

Bug fixes

- Fixed a data race in the `cloudstorage` sink. - [#147162][#147162] + #147162 - Fixed an error in `crdb_internal.table_spans` that could occur when a table's schema had been dropped. - [#147976][#147976] + #147976 - Fixed a bug where `libpq` clients using the async API could hang with large result sets (Python: psycopg; Ruby: ActiveRecord, ruby-pg). - [#148469][#148469] + #148469 - The `RESET ALL` statement no longer affects the following session variables: - `is_superuser` - `role` @@ -34,21 +34,12 @@ Release Date: July 28, 2025 - `transaction_read_only` This better matches PostgreSQL behavior for `RESET ALL`. In addition, the `DISCARD ALL` statement no longer errors when `default_transaction_use_follower_reads` is enabled. - [#149429][#149429] + #149429 - In v25.1, automatic partial statistics collection was enabled by default (by setting the `sql.stats.automatic_partial_collection.enabled` cluster setting to `true`). Partial statistics collection may encounter certain expected scenarios that were previously reported as failed stats jobs with PostgreSQL error code `55000`. These errors are benign and are no longer reported. Instead, the stats job will be marked as "succeeded," though no new statistics will be created. - [#149624][#149624] + #149624 - Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (the gateway node of the plan was not affected), and could only be mitigated by restarting the node. - [#149925][#149925] + #149925 - Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. - [#150339][#150339] - - -[#148521]: https://github.com/cockroachdb/cockroach/pull/148521 -[#147162]: https://github.com/cockroachdb/cockroach/pull/147162 -[#147976]: https://github.com/cockroachdb/cockroach/pull/147976 -[#148469]: https://github.com/cockroachdb/cockroach/pull/148469 -[#149925]: https://github.com/cockroachdb/cockroach/pull/149925 -[#150339]: https://github.com/cockroachdb/cockroach/pull/150339 -[#148223]: https://github.com/cockroachdb/cockroach/pull/148223 -[#149624]: https://github.com/cockroachdb/cockroach/pull/149624 -[#149429]: https://github.com/cockroachdb/cockroach/pull/149429 + #150339 + + diff --git a/src/current/_includes/releases/v25.2/backward-incompatible.md b/src/current/_includes/releases/v25.2/backward-incompatible.md index 33e6451cfa3..0631ff37936 100644 --- a/src/current/_includes/releases/v25.2/backward-incompatible.md +++ b/src/current/_includes/releases/v25.2/backward-incompatible.md @@ -1,11 +1,6 @@ Before [upgrading to CockroachDB v25.2]({% link v25.2/upgrade-cockroach-version.md %}), be sure to review the following backward-incompatible changes, as well as [key cluster setting changes](#v25-2-0-cluster-settings), and adjust your deployment as necessary. -- The default value of the `autocommit_before_ddl` session variable is now `true`. This will cause any schema change statement that is sent during a transaction to make the current transaction commit before executing the schema change in a separate transaction. Users who do not want the autocommit behavior can preserve the previous behavior by changing the default value of `autocommit_before_ddl` with: `ALTER ROLE ALL SET autocommit_before_ddl = false;`. [#139871] -- `DROP INDEX` can now only be run when `sql_safe_updates` is set to `false`. [#139456] -- Vector indexes do not support mutation while being created with `CREATE INDEX` or rebuilt with `ALTER PRIMARY KEY`. To prevent inadvertent application downtime, set the `sql_safe_updates` session setting to `false` when using `CREATE INDEX` or `ALTER PRIMARY KEY` with a vector index. [#144601] -- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`, etc.) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). [#144600] - -[#139871]: https://github.com/cockroachdb/cockroach/pull/139871 -[#139456]: https://github.com/cockroachdb/cockroach/pull/139456 -[#144601]: https://github.com/cockroachdb/cockroach/pull/144601 -[#144600]: https://github.com/cockroachdb/cockroach/pull/144600 \ No newline at end of file +- The default value of the `autocommit_before_ddl` session variable is now `true`. This will cause any schema change statement that is sent during a transaction to make the current transaction commit before executing the schema change in a separate transaction. Users who do not want the autocommit behavior can preserve the previous behavior by changing the default value of `autocommit_before_ddl` with: `ALTER ROLE ALL SET autocommit_before_ddl = false;`. #139871 +- `DROP INDEX` can now only be run when `sql_safe_updates` is set to `false`. #139456 +- Vector indexes do not support mutation while being created with `CREATE INDEX` or rebuilt with `ALTER PRIMARY KEY`. To prevent inadvertent application downtime, set the `sql_safe_updates` session setting to `false` when using `CREATE INDEX` or `ALTER PRIMARY KEY` with a vector index. #144601 +- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`, etc.) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). #144600 diff --git a/src/current/_includes/releases/v25.2/cluster-setting-changes.md b/src/current/_includes/releases/v25.2/cluster-setting-changes.md index a2ee5086953..aa66b76fb9f 100644 --- a/src/current/_includes/releases/v25.2/cluster-setting-changes.md +++ b/src/current/_includes/releases/v25.2/cluster-setting-changes.md @@ -12,7 +12,7 @@ Changes to [cluster settings]({% link v25.2/cluster-settings.md %}) should be re The following settings are now marked `public` after previously being `reserved`. Reserved settings are not documented and their tuning by customers is not supported. -- `sql.stats.detailed_latency_metrics.enabled` - Percentile latencies are no longer available for **SQL Activity**. The implementation of these percentiles was error-prone and difficult to understand because it was computed differently from the other SQL statistics collected. Customers interested in viewing percentile latencies per statement fingerprint are encouraged to use the experimental per-fingerprint histograms that can be enabled with the `sql.stats.detailed_latency_metrics.enabled` cluster setting. This will enable externalized histogram metrics via the Prometheus scrape endpoint. [#139500](https://github.com/cockroachdb/cockroach/pulls/139500) +- `sql.stats.detailed_latency_metrics.enabled` - Percentile latencies are no longer available for **SQL Activity**. The implementation of these percentiles was error-prone and difficult to understand because it was computed differently from the other SQL statistics collected. Customers interested in viewing percentile latencies per statement fingerprint are encouraged to use the experimental per-fingerprint histograms that can be enabled with the `sql.stats.detailed_latency_metrics.enabled` cluster setting. This will enable externalized histogram metrics via the Prometheus scrape endpoint. #139500
Settings requiring operational changes
diff --git a/src/current/_includes/releases/v25.2/v25.2.0-alpha.1.md b/src/current/_includes/releases/v25.2/v25.2.0-alpha.1.md index ab147b02fea..fb3a3c9760a 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-alpha.1.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-alpha.1.md @@ -6,258 +6,152 @@ Release Date: March 24, 2025

Backward-incompatible changes

-- The default value of the `autocommit_before_ddl` session variable is now `true`. This will cause any schema change statement that is sent during a transaction to make the current transaction commit before executing the schema change in a separate transaction. CockroachDB does not have full support for multiple [schema changes in a transaction]({% link v25.1/online-schema-changes.md %}#schema-changes-within-transactions). Users who do not want the autocommit behavior can preserve the previous behavior by changing the default value of `autocommit_before_ddl` with: `ALTER ROLE ALL SET autocommit_before_ddl = false;`. [#139871][#139871] +- The default value of the `autocommit_before_ddl` session variable is now `true`. This will cause any schema change statement that is sent during a transaction to make the current transaction commit before executing the schema change in a separate transaction. CockroachDB does not have full support for multiple [schema changes in a transaction]({% link v25.1/online-schema-changes.md %}#schema-changes-within-transactions). Users who do not want the autocommit behavior can preserve the previous behavior by changing the default value of `autocommit_before_ddl` with: `ALTER ROLE ALL SET autocommit_before_ddl = false;`. #139871

Security updates

-- Added the `server.oidc_authentication.provider.custom_ca` cluster setting to support custom root CA for verifying certificates while authenticating with the OIDC provider. [#140583][#140583] +- Added the `server.oidc_authentication.provider.custom_ca` cluster setting to support custom root CA for verifying certificates while authenticating with the OIDC provider. #140583

General changes

-- When changefeeds are created with a `resolved` option lower than the `min_checkpoint_frequency` option, an error message was displayed to inform the user. This message is now a notice and includes extra information if either option was set to its default value. [#142094][#142094] -- Added the logging of `changefeed_canceled` events to the telemetry log. [#142139][#142139] -- Updated the response headers of HTTP requests to include `"Cache-control: no-store"` instead of `"Cache-control:no-cache"`, which means that HTTP requests to the server will no longer be cached in the client. Requests for UI assets, such as `bundle.js` and fonts, will still include `"Cache-control:no-cache"` to ensure they are cached and that the DB console loads quickly. [#142277][#142277] -- Added the `headers_json_column_name` option to the Kafka sink, allowing users to specify a column in their table(s) of type `JSONB` to be used as the Kafka headers for each row. [#142092][#142092] -- Improved S3 credential caching for STS credentials. [#142434][#142434] +- When changefeeds are created with a `resolved` option lower than the `min_checkpoint_frequency` option, an error message was displayed to inform the user. This message is now a notice and includes extra information if either option was set to its default value. #142094 +- Added the logging of `changefeed_canceled` events to the telemetry log. #142139 +- Updated the response headers of HTTP requests to include `"Cache-control: no-store"` instead of `"Cache-control:no-cache"`, which means that HTTP requests to the server will no longer be cached in the client. Requests for UI assets, such as `bundle.js` and fonts, will still include `"Cache-control:no-cache"` to ensure they are cached and that the DB console loads quickly. #142277 +- Added the `headers_json_column_name` option to the Kafka sink, allowing users to specify a column in their table(s) of type `JSONB` to be used as the Kafka headers for each row. #142092 +- Improved S3 credential caching for STS credentials. #142434

SQL language changes

-- The `plan_cache_mode` session setting now defaults to `auto`, enabling generic query plans for some queries. [#135668][#135668] -- `SHOW JOBS` is now based on a new mechanism for storing information about the progress and status of running jobs. [#138104][#138104] -- `SHOW VIRTUAL CLUSTER WITH REPLICATION STATUS` now displays the `ingestion_job_id` column after the `name` column. [#138967][#138967] -- Since v23.2 table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. This can be reverted by setting the cluster setting `sql.stats.non_indexed_json_histograms.enabled` to `true`. [#139766][#139766] -- `optimizer_use_merged_partial_statistics` is now enabled by default, meaning the optimizer will use partial stats if available to estimate more up-to-date statistics. [#139925][#139925] -- The `optimizer_prefer_bounded_cardinality` session setting has been added that instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. [#139985][#139985] -- The `optimizer_min_row_count` session setting has been added that sets a lower bound on row count estimates for relational expressions during query planning. A value of `0`, which is the default, indicates no lower bound. If this is set to a value greater than `0`, a row count of `0` can still be estimated for expressions with a cardinality of `0`, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. [#140065][#140065] -- Fixed a bug existing only in pre-release versions of v25.1 that could cause unexpected errors during planning for `VALUES` expressions containing function calls with multiple overloads. [#140277][#140277] -- The `optimizer_check_input_min_row_count` session setting has been added to control the minimum row count estimate for buffer scans of foreign key and uniqueness checks. It defaults to `0`. [#140735][#140735] -- Added the `jsonpath` type, without parsing, evaluation, or table creation. Currently accepts any non-empty string. [#140204][#140204] -- Added the `substring_index` built-in function, which extracts a portion of a string based on a specified delimiter and occurrence count, which follows MySQL behavior. [#141929][#141929] -- Added compression support for changefeed webhook sinks. This reduces network bandwidth and storage usage, improving performance and lowering costs. Users can enable compression by setting the `compression=` option. Supported algorithms are `gzip` and `zstd`. [#138872][#138872] -- Holdable cursors declared using `CURSOR WITH HOLD` are now supported. A holdable cursor fully executes a query upon transaction commit and stores the result in a row container, which is maintained by the session. [#141943][#141943] -- The `split_part` built-in function now supports negative `return_index_pos` values, returning the |n|th field from the end when specified. [#141944][#141944] -- Added a parser for the `jsonpath` type. Accepts setting mode (`strict/lax`), key accessors (`.name`), and array wildcards (`[*]`). [#142010][#142010] -- Added the new option `WITH IGNORE_FOREIGN_KEYS` to the `SHOW CREATE TABLE` statement so that foreign key constraints are not included in the output schema. This option is also acceptable in `SHOW CREATE VIEW`, but has no influence there. It cannot be combined with the existing `WITH REDACT` option. [#142151][#142151] -- `CREATE TABLE AS SELECT ... FROM ... AS OF SYSTEM TIME x` is now supported. It cannot be executed within an explicit transaction. [#142147][#142147] -- Invocations of stored procedures via `CALL` statements will now be counted toward the newly added `sql.call_stored_proc.count.started` and `sql.call_stored_proc.count` metrics. Previously, they were counted against the `sql.misc.count.started` and `sql.misc.count` metrics. [#142249][#142249] -- Statements such as `REFRESH MATERIALIZED VIEW` and `CREATE MATERIALIZED VIEW` can now be executed with an `AS OF SYSTEM TIME` clause. These statements can still not be used in an explicit transaction. [#142259][#142259] +- The `plan_cache_mode` session setting now defaults to `auto`, enabling generic query plans for some queries. #135668 +- `SHOW JOBS` is now based on a new mechanism for storing information about the progress and status of running jobs. #138104 +- `SHOW VIRTUAL CLUSTER WITH REPLICATION STATUS` now displays the `ingestion_job_id` column after the `name` column. #138967 +- Since v23.2 table statistics histograms have been collected for non-indexed JSON columns. Histograms are no longer collected for these columns. This reduces memory usage during table statistics collection, for both automatic and manual collection via `ANALYZE` and `CREATE STATISTICS`. This can be reverted by setting the cluster setting `sql.stats.non_indexed_json_histograms.enabled` to `true`. #139766 +- `optimizer_use_merged_partial_statistics` is now enabled by default, meaning the optimizer will use partial stats if available to estimate more up-to-date statistics. #139925 +- The `optimizer_prefer_bounded_cardinality` session setting has been added that instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. This may help the optimizer produce better query plans in some cases. This setting is disabled by default. #139985 +- The `optimizer_min_row_count` session setting has been added that sets a lower bound on row count estimates for relational expressions during query planning. A value of `0`, which is the default, indicates no lower bound. If this is set to a value greater than `0`, a row count of `0` can still be estimated for expressions with a cardinality of `0`, e.g., for a contradictory filter. Setting this to a value higher than `0`, such as `1`, may yield better query plans in some cases, such as when statistics are frequently stale and inaccurate. #140065 +- Fixed a bug existing only in pre-release versions of v25.1 that could cause unexpected errors during planning for `VALUES` expressions containing function calls with multiple overloads. #140277 +- The `optimizer_check_input_min_row_count` session setting has been added to control the minimum row count estimate for buffer scans of foreign key and uniqueness checks. It defaults to `0`. #140735 +- Added the `jsonpath` type, without parsing, evaluation, or table creation. Currently accepts any non-empty string. #140204 +- Added the `substring_index` built-in function, which extracts a portion of a string based on a specified delimiter and occurrence count, which follows MySQL behavior. #141929 +- Added compression support for changefeed webhook sinks. This reduces network bandwidth and storage usage, improving performance and lowering costs. Users can enable compression by setting the `compression=` option. Supported algorithms are `gzip` and `zstd`. #138872 +- Holdable cursors declared using `CURSOR WITH HOLD` are now supported. A holdable cursor fully executes a query upon transaction commit and stores the result in a row container, which is maintained by the session. #141943 +- The `split_part` built-in function now supports negative `return_index_pos` values, returning the |n|th field from the end when specified. #141944 +- Added a parser for the `jsonpath` type. Accepts setting mode (`strict/lax`), key accessors (`.name`), and array wildcards (`[*]`). #142010 +- Added the new option `WITH IGNORE_FOREIGN_KEYS` to the `SHOW CREATE TABLE` statement so that foreign key constraints are not included in the output schema. This option is also acceptable in `SHOW CREATE VIEW`, but has no influence there. It cannot be combined with the existing `WITH REDACT` option. #142151 +- `CREATE TABLE AS SELECT ... FROM ... AS OF SYSTEM TIME x` is now supported. It cannot be executed within an explicit transaction. #142147 +- Invocations of stored procedures via `CALL` statements will now be counted toward the newly added `sql.call_stored_proc.count.started` and `sql.call_stored_proc.count` metrics. Previously, they were counted against the `sql.misc.count.started` and `sql.misc.count` metrics. #142249 +- Statements such as `REFRESH MATERIALIZED VIEW` and `CREATE MATERIALIZED VIEW` can now be executed with an `AS OF SYSTEM TIME` clause. These statements can still not be used in an explicit transaction. #142259 - Added support for the following in the `jsonpath` parser: - Double-quoted key accessors within `jsonpath` (`SELECT '$."1key"."key2"'::JSONPATH;`). - Array integer indexing (ex. `$.a[1]`). - Array ranges (ex. `$.a[1 to 3]`). - - Array unions (ex `$.a[1, 2 to 4, 7, 8]`). [#142241][#142241] -- Fixed a regression due to join-elimination rules that left a Project operator below a `JOIN`, preventing optimizer rules from applying. [#142252][#142252] -- Added `ALTER VIRTUAL CLUSTER .. SET REPLICATION SOURCE` so users can configure the producer jobs on the source cluster for physical cluster replication (PCR). Currently, they can only configure the `EXPIRATION WINDOW`. This patch also removes the `EXPIRATION WINDOW` option from the consumer side of the statement, `ALTER VIRTUAL CLUSTER SET REPLICATION`. [#142501][#142501] -- Added the `jsonb_path_query` function, which takes in a JSON object and a `jsonpath` query, and returns the resulting JSON object. [#142336][#142336] -- Updated the `CREATE TRIGGER` statement `only implemented in the declarative schema changer` error message to include a helpful suggestion and link to relevant docs. [#141738][#141738] + - Array unions (ex `$.a[1, 2 to 4, 7, 8]`). #142241 +- Fixed a regression due to join-elimination rules that left a Project operator below a `JOIN`, preventing optimizer rules from applying. #142252 +- Added `ALTER VIRTUAL CLUSTER .. SET REPLICATION SOURCE` so users can configure the producer jobs on the source cluster for physical cluster replication (PCR). Currently, they can only configure the `EXPIRATION WINDOW`. This patch also removes the `EXPIRATION WINDOW` option from the consumer side of the statement, `ALTER VIRTUAL CLUSTER SET REPLICATION`. #142501 +- Added the `jsonb_path_query` function, which takes in a JSON object and a `jsonpath` query, and returns the resulting JSON object. #142336 +- Updated the `CREATE TRIGGER` statement `only implemented in the declarative schema changer` error message to include a helpful suggestion and link to relevant docs. #141738

Operational changes

-- Removed the `storage.queue.store-failures` metric. [#139150][#139150] -- Customers must provide URIs as external connections to create logical data replication (LDR) statements. [#139383][#139383] +- Removed the `storage.queue.store-failures` metric. #139150 +- Customers must provide URIs as external connections to create logical data replication (LDR) statements. #139383 - The following cluster settings have been deprecated: - `sql.metrics.statement_details.plan_collection.enabled` - - `sql.metrics.statement_details.plan_collection.period` [#138042][#138042] -- Reduced noise when using dynamically provisioned logging sinks. [#139565][#139565] + - `sql.metrics.statement_details.plan_collection.period` #138042 +- Reduced noise when using dynamically provisioned logging sinks. #139565 - Added metrics for monitoring changefeed span-level checkpoint creation: - `changefeed.checkpoint.create_nanos`, which measures the time it takes to create a changefeed checkpoint. - `changefeed.checkpoint.total_bytes`, which measures the total size of a changefeed checkpoint in bytes. - - `changefeed.checkpoint.span_count`, which measures the number of spans in a changefeed checkpoint. [#139375][#139375] + - `changefeed.checkpoint.span_count`, which measures the number of spans in a changefeed checkpoint. #139375 - The following schema changes are now allowlisted to run during LDR. - `ALTER INDEX RENAME`. - `ALTER INDEX .. NOT VISIBLE`. - `ALTER TABLE .. ALTER COLUMN .. SET DEFAULT`. - `ALTER TABLE .. ALTER COLUMN .. DROP DEFAULT`. - - `ALTER TABLE .. ALTER COLUMN SET VISIBLE`. [#141858][#141858] -- Added `sql.statement_timeout.count` to track the number of SQL statements that fail due to exceeding the statement timeout. [#142078][#142078] -- Added the `sql.transaction_timeout.count` metric to track the number of SQL statements that fail due to exceeding the transaction timeout. [#142105][#142105] -- Added the `jobs.row_level_ttl.num_delete_batch_retries` metric to track the number of times the TTL job had to reduce the batch size and try again. [#141953][#141953] -- To create a logical data replication (LDR) stream, users require the `REPLICATIONDEST` privilege, instead of the `REPLICATION` privilege. [#142345][#142345] -- To create a physical cluster replication (PCR) stream, users require the `REPLICATIONDEST` privilege, in addition to the already required `MANAGEVIRTUALCLUSTER` privilege. [#142345][#142345] -- Removed the `kv.snapshot_receiver.excise.enable` cluster setting. Excise is now enabled unconditionally. [#142651][#142651] -- Introduced the cluster setting `server.child_metrics.include_aggregate.enabled`, which modifies the behavior of Prometheus metric reporting (`/_status/vars`). By default, it is set to `true`, which maintains the existing behavior. It can be sert to `false` to stop the reporting of the aggregate time series that prevents issues with double counting when querying metrics. [#141601][#141601] -- When configuring the `sql.ttl.default_delete_rate_limit` cluster setting, a notice is displayed informing that the TTL rate limit is per leaseholder per node with a link to the docs. [#142061][#142061] -- Added a new `envelope` type `enriched` for changefeeds. [#140112][#140112] -- Added support for the `enriched` envelope type to Avro format changefeeds. [#140525][#140525] -- The cluster setting `changefeed.new_webhook_sink_enabled`/`changefeed.new_webhook_sink.enabled` is no longer supported. The new webhook sink has been enabled by default since v23.2, and the first version webhook sink has been removed. [#141940][#141940] -- The cluster setting `changefeed.new_pubsub_sink_enabled`/`changefeed.new_pubsub_sink.enabled` is no longer supported. The new Google Cloud Pub/Sub sink has been enabled by default since v23.2, and the first version Pub/Sub sink has been removed. [#141948][#141948] -- `DROP INDEX` can now only be run when `sql_safe_updates` is set to `false`. [#139456][#139456] + - `ALTER TABLE .. ALTER COLUMN SET VISIBLE`. #141858 +- Added `sql.statement_timeout.count` to track the number of SQL statements that fail due to exceeding the statement timeout. #142078 +- Added the `sql.transaction_timeout.count` metric to track the number of SQL statements that fail due to exceeding the transaction timeout. #142105 +- Added the `jobs.row_level_ttl.num_delete_batch_retries` metric to track the number of times the TTL job had to reduce the batch size and try again. #141953 +- To create a logical data replication (LDR) stream, users require the `REPLICATIONDEST` privilege, instead of the `REPLICATION` privilege. #142345 +- To create a physical cluster replication (PCR) stream, users require the `REPLICATIONDEST` privilege, in addition to the already required `MANAGEVIRTUALCLUSTER` privilege. #142345 +- Removed the `kv.snapshot_receiver.excise.enable` cluster setting. Excise is now enabled unconditionally. #142651 +- Introduced the cluster setting `server.child_metrics.include_aggregate.enabled`, which modifies the behavior of Prometheus metric reporting (`/_status/vars`). By default, it is set to `true`, which maintains the existing behavior. It can be sert to `false` to stop the reporting of the aggregate time series that prevents issues with double counting when querying metrics. #141601 +- When configuring the `sql.ttl.default_delete_rate_limit` cluster setting, a notice is displayed informing that the TTL rate limit is per leaseholder per node with a link to the docs. #142061 +- Added a new `envelope` type `enriched` for changefeeds. #140112 +- Added support for the `enriched` envelope type to Avro format changefeeds. #140525 +- The cluster setting `changefeed.new_webhook_sink_enabled`/`changefeed.new_webhook_sink.enabled` is no longer supported. The new webhook sink has been enabled by default since v23.2, and the first version webhook sink has been removed. #141940 +- The cluster setting `changefeed.new_pubsub_sink_enabled`/`changefeed.new_pubsub_sink.enabled` is no longer supported. The new Google Cloud Pub/Sub sink has been enabled by default since v23.2, and the first version Pub/Sub sink has been removed. #141948 +- `DROP INDEX` can now only be run when `sql_safe_updates` is set to `false`. #139456

Command-line changes

-- Improved the performance of the debug zip query that collects `transaction_contention_events` data, reducing the chances of `memory budget exceeded` or `query execution canceled due to statement timeout` errors. [#139735][#139735] -- Removed the deprecated `--storage-engine` parameter from the CLI. [#139744][#139744] +- Improved the performance of the debug zip query that collects `transaction_contention_events` data, reducing the chances of `memory budget exceeded` or `query execution canceled due to statement timeout` errors. #139735 +- Removed the deprecated `--storage-engine` parameter from the CLI. #139744

DB Console changes

-- The `/_admin/v1/settings` API (and therefore cluster settings console page) now returns cluster settings using the same redaction logic as querying `SHOW CLUSTER SETTINGS` and `crdb_internal.cluster_settings`. This means that only settings flagged as "sensitive" will be redacted, all other settings will be visible. The same authorization is required for this endpoint, meaning the user must be an `admin`, have `MODIFYCLUSTERSETTINGS`, or `VIEWCLUSTERSETTINGS` roles to use this API. The exception is that if the user has `VIEWACTIVITY` or `VIEWACTIVITYREDACTED`, they will see console-only settings. [#138688][#138688] -- The **Overload** dashboard in the DB Console now shows only the v2 replication admission control metrics, where previously it displayed both v1 and v2 metrics. Additionally, the aggregate size of queued replication entries is now shown. [#139066][#139066] -- Jobs can now choose to emit messages that are shown on the **Jobs Details** page in v25.1 and later. [#139246][#139246] -- An event is posted when a store is getting close to full capacity. [#139199][#139199] -- Percentile latencies are no longer available for **SQL Activity**. The implementation of these percentiles was error-prone and difficult to understand because it was computed differently from the other SQL statistics collected. Customers interested in viewing percentile latencies per statement fingerprint are encouraged to use the experimental per-fingerprint histograms that can be enabled with the `sql.stats.detailed_latency_metrics.enabled` cluster setting. This will enable externalized histogram metrics via the Prometheus scrape endpoint. [#139500][#139500] -- Surfaced commit latency on the **Transactions** pages [#139946][#139946] -- Removed the **Paused Follower** graph from the **Replication** dashboard in the DB Console as followers are no longer paused by default from v25.1. [#141427][#141427] -- DB console's `index.html` page now includes a Content-Security-Policy (CSP) header to help prevent malicious XSS attacks. [#142282][#142282] +- The `/_admin/v1/settings` API (and therefore cluster settings console page) now returns cluster settings using the same redaction logic as querying `SHOW CLUSTER SETTINGS` and `crdb_internal.cluster_settings`. This means that only settings flagged as "sensitive" will be redacted, all other settings will be visible. The same authorization is required for this endpoint, meaning the user must be an `admin`, have `MODIFYCLUSTERSETTINGS`, or `VIEWCLUSTERSETTINGS` roles to use this API. The exception is that if the user has `VIEWACTIVITY` or `VIEWACTIVITYREDACTED`, they will see console-only settings. #138688 +- The **Overload** dashboard in the DB Console now shows only the v2 replication admission control metrics, where previously it displayed both v1 and v2 metrics. Additionally, the aggregate size of queued replication entries is now shown. #139066 +- Jobs can now choose to emit messages that are shown on the **Jobs Details** page in v25.1 and later. #139246 +- An event is posted when a store is getting close to full capacity. #139199 +- Percentile latencies are no longer available for **SQL Activity**. The implementation of these percentiles was error-prone and difficult to understand because it was computed differently from the other SQL statistics collected. Customers interested in viewing percentile latencies per statement fingerprint are encouraged to use the experimental per-fingerprint histograms that can be enabled with the `sql.stats.detailed_latency_metrics.enabled` cluster setting. This will enable externalized histogram metrics via the Prometheus scrape endpoint. #139500 +- Surfaced commit latency on the **Transactions** pages #139946 +- Removed the **Paused Follower** graph from the **Replication** dashboard in the DB Console as followers are no longer paused by default from v25.1. #141427 +- DB console's `index.html` page now includes a Content-Security-Policy (CSP) header to help prevent malicious XSS attacks. #142282

Bug fixes

-- Previously, storage parameters with the same key would lead to ambiguity. This has now been fixed and an error surfaced if duplicate storage parameters are specified. [#139172][#139172] -- Fixed a bug where the error `batch timestamp T must be after replica GC threshold` could occur during a schema change backfill operation, causing the schema change job to retry infinitely. Now, this error is treated as permanent, and will cause the job to enter the `failed` state. [#139203][#139203] -- Previously, whenever CockroachDB collected a statement bundle when plan-gist-based matching was used, the `plan.txt` file would be incomplete. This bug is now fixed—it had been present since the introduction of the plan-gist-based matching feature in v23.1, but was partially addressed in the v24.2 release. [#127604][#127604] -- Previously, `EXPLAIN ANALYZE` of mutation statements would always get `actual row count: 1` execution statistic for the corresponding mutation node in the plan, regardless of how many rows were actually modified. The bug has been present since before v22.2 and is now fixed. [#139278][#139278] -- Fixed a bug where sometimes activating diagnostics for SQL activity appeared unresponsive, with no state or status update upon activating. Now, the status should always reflect that diagnostics are active or that a statement bundle is downloadable. [#139342][#139342] -- The `to_regclass`, `to_regtype`, `to_regrole`, and related functions now return `NULL` for any numerical input argument. [#139777][#139777] -- Fixed a rare bug in which a query might fail with the error `could not find computed column expression for column in table` while dropping a virtual computed column from the table. This bug was introduced in v23.2.4. [#139388][#139388] -- The optimizer could produce incorrect query plans for queries using trigram similarity filters (e.g., `col % 'val'`) when `pg_trgm.similarity_threshold` was set to `0`. This bug was introduced in v22.2.0 and is now fixed. Note that this issue does not affect v24.2.0 and later releases when the `optimizer_use_trigram_similarity_optimization` session variable (introduced in v24.2.0) is set to its default value `true`, as it would skip this behavior. [#139265][#139265] -- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `batch timestamp must be after replica GC threshold` error. [#139532][#139532] -- Fixed a bug that existed only in pre-release versions of v25.1. The bug could cause creation of a PL/pgSQL routine with a common table expression (CTE) to fail with an error like the following: `unexpected root expression: with`. [#140083][#140083] -- Configuring replication controls on a partition name of an index that is not unique across all indexes will correctly impact only that partition. [#140167][#140167] -- The **Data distribution** page in Advanced Debug will no longer crash if there are `NULL` values for `raw_sql_config` in `crdb_internal.zones`. [#140066][#140066] -- Fixed a bug where dropping a table with a trigger using the legacy schema changer could leave an orphaned reference in the descriptor. This occurred when two tables were dependent on each other via a trigger, and the table containing the trigger was dropped. [#140995][#140995] -- Addressed a bug that could cause concurrent DML operations to prevent primary key changes from succeeding. [#141189][#141189] -- Fixed a bug that prevented transaction retry errors encountered during implicit transactions from being automatically retried internally if the `autocommit_before_ddl` session variable was enabled and the statement was a schema change. [#141369][#141369] -- A step in the v25.1 upgrade finalization process that required backfilling jobs now uses locks to ensure it makes progress even when there is contention on the jobs table, which will prevent the possibility of becoming stuck under heavy load. [#141420][#141420] -- Fixed a bug that could prevent `SHOW CREATE TABLE` from working if a database was offline (e.g., due to a `RESTORE` on that database). [#141195][#141195] -- Fixed a bug that prevented starting multi-table logical data replication (LDR) streams on tables that used user-defined types. [#141634][#141634] -- Fixed a bug that could cause `nil pointer dereference` errors when executing statements with UDFs. The error could also occur when executing statements with some built-in functions, like `obj_description`. [#141596][#141596] -- Fixed a bug where a node that was drained as part of decommissioning may have interrupted SQL connections that were still active during drain (and for which drain would have been expected to wait). [#141411][#141411] -- Fixed a bug where the fraction completed and internal checkpoints during an index backfill operation would stop getting written if any of the periodic fraction/checkpoint write operations failed. Additional logging was added so that progress is logged in addition to being written to the job record. This bug affected schema change operations, such as creating an index or adding a non-nullable column to a table. [#141714][#141714] -- Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index with the primary key column as the predicate expression. [#141728][#141728] -- Fixed a bug where CockroachDB could incorrectly evaluate casts to some `OID` types (like `REGCLASS`) in some cases. The bug has been present since at least v22.1. [#141946][#141946] -- Transactions that enter the `aborted` state now release locks they are holding immediately, provided there is no `SAVEPOINT` active in the transaction. [#140160][#140160] -- Fixed a bug when running with `autocommit_before_ddl` that could cause a runtime error when binding a previously prepared DDL statement. [#142034][#142034] -- Fixed a bug where orphaned leases were not properly cleaned up. [#141429][#141429] -- Previously, the `CREATE LOGICALLY REPLICATED` syntax would always create the destination side table with the source side name, instead of the user-provided name. This change ensures the user-provided name is used. [#142235][#142235] -- Fixed a bug that would prevent `CREATE TRIGGER` and `DROP TRIGGER` statements from working if the `autocommit_before_ddl` setting was enabled, and if the statement was either sent as a prepared statement or as part of a batch of multiple statements. [#142202][#142202] -- Fixed a bug that could cause the upgrade to v25.1 to crash if a job was missing from the virtual table, for example, if a malformed job had no payload information. [#142284][#142284] -- The TTL deletion job now includes a retry mechanism that progressively reduces the batch size when encountering contention. This improves the chances of successful deletion without requiring manual adjustments to TTL job settings. [#141953][#141953] -- Fixed an issue where removed nodes could leave persistent entries in `crdb_internal.gossip_alerts`. [#142385][#142385] -- Invalid default expressions could cause backfilling schema changes to retry forever. [#142490][#142490] -- Fast failback could succeed even if the destination cluster's protected timestamp had been removed, causing the reverse stream to enter a crashing loop. This fix ensures the failback command fast fails. [#142231][#142231] -- Fixed an issue where dropping a database with triggers could fail due to an undropped back reference to a trigger function. [#142670][#142670] -- Fixed a bug where replication controls on indexes and partitions would not get properly updated during an index backfill (in the declarative schema changer) to its new ID; effectively discarding the replication controls set on it before the backfill. [#141800][#141800] -- Addressed a bug where `CREATE SEQUENCE` could succeed under with a `DROP SCHEMA` or `DROP DATABASE` in progress. [#142696][#142696] -- Fixed a bug in client certificate expiration metrics. [#142682][#142682] -- Physical cluster replication (PCR) reader catalogs could have orphan rows in `system.namespace` after an object is renamed. [#142829][#142829] -- Fixed a bug where during validation of a table-level zone configuration, inherited values were incorrectly populated from the default range instead of from the parent database. [#142760][#142760] -- Fixed a bug that would send a replica outside of a tenant known region, when `SURVIVE REGION FAILURE` was set and exactly 3 regions were configured. [#142838][#142838] +- Previously, storage parameters with the same key would lead to ambiguity. This has now been fixed and an error surfaced if duplicate storage parameters are specified. #139172 +- Fixed a bug where the error `batch timestamp T must be after replica GC threshold` could occur during a schema change backfill operation, causing the schema change job to retry infinitely. Now, this error is treated as permanent, and will cause the job to enter the `failed` state. #139203 +- Previously, whenever CockroachDB collected a statement bundle when plan-gist-based matching was used, the `plan.txt` file would be incomplete. This bug is now fixed—it had been present since the introduction of the plan-gist-based matching feature in v23.1, but was partially addressed in the v24.2 release. #127604 +- Previously, `EXPLAIN ANALYZE` of mutation statements would always get `actual row count: 1` execution statistic for the corresponding mutation node in the plan, regardless of how many rows were actually modified. The bug has been present since before v22.2 and is now fixed. #139278 +- Fixed a bug where sometimes activating diagnostics for SQL activity appeared unresponsive, with no state or status update upon activating. Now, the status should always reflect that diagnostics are active or that a statement bundle is downloadable. #139342 +- The `to_regclass`, `to_regtype`, `to_regrole`, and related functions now return `NULL` for any numerical input argument. #139777 +- Fixed a rare bug in which a query might fail with the error `could not find computed column expression for column in table` while dropping a virtual computed column from the table. This bug was introduced in v23.2.4. #139388 +- The optimizer could produce incorrect query plans for queries using trigram similarity filters (e.g., `col % 'val'`) when `pg_trgm.similarity_threshold` was set to `0`. This bug was introduced in v22.2.0 and is now fixed. Note that this issue does not affect v24.2.0 and later releases when the `optimizer_use_trigram_similarity_optimization` session variable (introduced in v24.2.0) is set to its default value `true`, as it would skip this behavior. #139265 +- Fixed a bug that could cause `SHOW TABLES` and other introspection operations to encounter a `batch timestamp must be after replica GC threshold` error. #139532 +- Fixed a bug that existed only in pre-release versions of v25.1. The bug could cause creation of a PL/pgSQL routine with a common table expression (CTE) to fail with an error like the following: `unexpected root expression: with`. #140083 +- Configuring replication controls on a partition name of an index that is not unique across all indexes will correctly impact only that partition. #140167 +- The **Data distribution** page in Advanced Debug will no longer crash if there are `NULL` values for `raw_sql_config` in `crdb_internal.zones`. #140066 +- Fixed a bug where dropping a table with a trigger using the legacy schema changer could leave an orphaned reference in the descriptor. This occurred when two tables were dependent on each other via a trigger, and the table containing the trigger was dropped. #140995 +- Addressed a bug that could cause concurrent DML operations to prevent primary key changes from succeeding. #141189 +- Fixed a bug that prevented transaction retry errors encountered during implicit transactions from being automatically retried internally if the `autocommit_before_ddl` session variable was enabled and the statement was a schema change. #141369 +- A step in the v25.1 upgrade finalization process that required backfilling jobs now uses locks to ensure it makes progress even when there is contention on the jobs table, which will prevent the possibility of becoming stuck under heavy load. #141420 +- Fixed a bug that could prevent `SHOW CREATE TABLE` from working if a database was offline (e.g., due to a `RESTORE` on that database). #141195 +- Fixed a bug that prevented starting multi-table logical data replication (LDR) streams on tables that used user-defined types. #141634 +- Fixed a bug that could cause `nil pointer dereference` errors when executing statements with UDFs. The error could also occur when executing statements with some built-in functions, like `obj_description`. #141596 +- Fixed a bug where a node that was drained as part of decommissioning may have interrupted SQL connections that were still active during drain (and for which drain would have been expected to wait). #141411 +- Fixed a bug where the fraction completed and internal checkpoints during an index backfill operation would stop getting written if any of the periodic fraction/checkpoint write operations failed. Additional logging was added so that progress is logged in addition to being written to the job record. This bug affected schema change operations, such as creating an index or adding a non-nullable column to a table. #141714 +- Fixed a bug that could cause gateway nodes to panic when performing an `UPSERT` on a table with a `BOOL` primary key column and a partial index with the primary key column as the predicate expression. #141728 +- Fixed a bug where CockroachDB could incorrectly evaluate casts to some `OID` types (like `REGCLASS`) in some cases. The bug has been present since at least v22.1. #141946 +- Transactions that enter the `aborted` state now release locks they are holding immediately, provided there is no `SAVEPOINT` active in the transaction. #140160 +- Fixed a bug when running with `autocommit_before_ddl` that could cause a runtime error when binding a previously prepared DDL statement. #142034 +- Fixed a bug where orphaned leases were not properly cleaned up. #141429 +- Previously, the `CREATE LOGICALLY REPLICATED` syntax would always create the destination side table with the source side name, instead of the user-provided name. This change ensures the user-provided name is used. #142235 +- Fixed a bug that would prevent `CREATE TRIGGER` and `DROP TRIGGER` statements from working if the `autocommit_before_ddl` setting was enabled, and if the statement was either sent as a prepared statement or as part of a batch of multiple statements. #142202 +- Fixed a bug that could cause the upgrade to v25.1 to crash if a job was missing from the virtual table, for example, if a malformed job had no payload information. #142284 +- The TTL deletion job now includes a retry mechanism that progressively reduces the batch size when encountering contention. This improves the chances of successful deletion without requiring manual adjustments to TTL job settings. #141953 +- Fixed an issue where removed nodes could leave persistent entries in `crdb_internal.gossip_alerts`. #142385 +- Invalid default expressions could cause backfilling schema changes to retry forever. #142490 +- Fast failback could succeed even if the destination cluster's protected timestamp had been removed, causing the reverse stream to enter a crashing loop. This fix ensures the failback command fast fails. #142231 +- Fixed an issue where dropping a database with triggers could fail due to an undropped back reference to a trigger function. #142670 +- Fixed a bug where replication controls on indexes and partitions would not get properly updated during an index backfill (in the declarative schema changer) to its new ID; effectively discarding the replication controls set on it before the backfill. #141800 +- Addressed a bug where `CREATE SEQUENCE` could succeed under with a `DROP SCHEMA` or `DROP DATABASE` in progress. #142696 +- Fixed a bug in client certificate expiration metrics. #142682 +- Physical cluster replication (PCR) reader catalogs could have orphan rows in `system.namespace` after an object is renamed. #142829 +- Fixed a bug where during validation of a table-level zone configuration, inherited values were incorrectly populated from the default range instead of from the parent database. #142760 +- Fixed a bug that would send a replica outside of a tenant known region, when `SURVIVE REGION FAILURE` was set and exactly 3 regions were configured. #142838

Performance improvements

-- Improved directory traversal performance by switching from `filepath.Walk` to `filepath.WalkDir`. [#139108][#139108] -- Removed a potential storage read from the Raft commit pipeline. This reduces the worst-case KV write latency. [#139609][#139609] -- The `optimizer_check_input_min_row_count` session setting now defaults to `1`, resulting in better query plans for foreign key and uniqueness checks. [#140735][#140735] -- This change restores the changefeed checkpoint immediately to the change frontier. This potentially reduces duplicate messages in the event that the frontier writes a checkpoint before it receives updates and covers the previous checkpoint from the aggregators, overwriting the checkpoint with less information. [#139969][#139969] +- Improved directory traversal performance by switching from `filepath.Walk` to `filepath.WalkDir`. #139108 +- Removed a potential storage read from the Raft commit pipeline. This reduces the worst-case KV write latency. #139609 +- The `optimizer_check_input_min_row_count` session setting now defaults to `1`, resulting in better query plans for foreign key and uniqueness checks. #140735 +- This change restores the changefeed checkpoint immediately to the change frontier. This potentially reduces duplicate messages in the event that the frontier writes a checkpoint before it receives updates and covers the previous checkpoint from the aggregators, overwriting the checkpoint with less information. #139969

Build changes

-- Upgraded to Go v1.23.6. [#140626][#140626] -- Enabled the use of profile-guided optimization in the `cockroach` binary. [#142697][#142697] -- Upgraded to Go v1.23.7. [#142698][#142698] +- Upgraded to Go v1.23.6. #140626 +- Enabled the use of profile-guided optimization in the `cockroach` binary. #142697 +- Upgraded to Go v1.23.7. #142698 -[#127604]: https://github.com/cockroachdb/cockroach/pull/127604 -[#138042]: https://github.com/cockroachdb/cockroach/pull/138042 -[#138104]: https://github.com/cockroachdb/cockroach/pull/138104 -[#138688]: https://github.com/cockroachdb/cockroach/pull/138688 -[#138872]: https://github.com/cockroachdb/cockroach/pull/138872 -[#138967]: https://github.com/cockroachdb/cockroach/pull/138967 -[#139066]: https://github.com/cockroachdb/cockroach/pull/139066 -[#139108]: https://github.com/cockroachdb/cockroach/pull/139108 -[#139150]: https://github.com/cockroachdb/cockroach/pull/139150 -[#139172]: https://github.com/cockroachdb/cockroach/pull/139172 -[#139199]: https://github.com/cockroachdb/cockroach/pull/139199 -[#139203]: https://github.com/cockroachdb/cockroach/pull/139203 -[#139246]: https://github.com/cockroachdb/cockroach/pull/139246 -[#139265]: https://github.com/cockroachdb/cockroach/pull/139265 -[#139278]: https://github.com/cockroachdb/cockroach/pull/139278 -[#139342]: https://github.com/cockroachdb/cockroach/pull/139342 -[#139365]: https://github.com/cockroachdb/cockroach/pull/139365 -[#139375]: https://github.com/cockroachdb/cockroach/pull/139375 -[#139383]: https://github.com/cockroachdb/cockroach/pull/139383 -[#139388]: https://github.com/cockroachdb/cockroach/pull/139388 -[#139456]: https://github.com/cockroachdb/cockroach/pull/139456 -[#139500]: https://github.com/cockroachdb/cockroach/pull/139500 -[#139532]: https://github.com/cockroachdb/cockroach/pull/139532 -[#139565]: https://github.com/cockroachdb/cockroach/pull/139565 -[#139609]: https://github.com/cockroachdb/cockroach/pull/139609 -[#139735]: https://github.com/cockroachdb/cockroach/pull/139735 -[#139744]: https://github.com/cockroachdb/cockroach/pull/139744 -[#139766]: https://github.com/cockroachdb/cockroach/pull/139766 -[#139777]: https://github.com/cockroachdb/cockroach/pull/139777 -[#139871]: https://github.com/cockroachdb/cockroach/pull/139871 -[#139925]: https://github.com/cockroachdb/cockroach/pull/139925 -[#139946]: https://github.com/cockroachdb/cockroach/pull/139946 -[#139969]: https://github.com/cockroachdb/cockroach/pull/139969 -[#139985]: https://github.com/cockroachdb/cockroach/pull/139985 -[#140065]: https://github.com/cockroachdb/cockroach/pull/140065 -[#140066]: https://github.com/cockroachdb/cockroach/pull/140066 -[#140083]: https://github.com/cockroachdb/cockroach/pull/140083 -[#140112]: https://github.com/cockroachdb/cockroach/pull/140112 -[#140160]: https://github.com/cockroachdb/cockroach/pull/140160 -[#140167]: https://github.com/cockroachdb/cockroach/pull/140167 -[#140204]: https://github.com/cockroachdb/cockroach/pull/140204 -[#140277]: https://github.com/cockroachdb/cockroach/pull/140277 -[#140525]: https://github.com/cockroachdb/cockroach/pull/140525 -[#140583]: https://github.com/cockroachdb/cockroach/pull/140583 -[#140626]: https://github.com/cockroachdb/cockroach/pull/140626 -[#140735]: https://github.com/cockroachdb/cockroach/pull/140735 -[#140995]: https://github.com/cockroachdb/cockroach/pull/140995 -[#141189]: https://github.com/cockroachdb/cockroach/pull/141189 -[#141195]: https://github.com/cockroachdb/cockroach/pull/141195 -[#141369]: https://github.com/cockroachdb/cockroach/pull/141369 -[#141411]: https://github.com/cockroachdb/cockroach/pull/141411 -[#141420]: https://github.com/cockroachdb/cockroach/pull/141420 -[#141427]: https://github.com/cockroachdb/cockroach/pull/141427 -[#141429]: https://github.com/cockroachdb/cockroach/pull/141429 -[#141596]: https://github.com/cockroachdb/cockroach/pull/141596 -[#141601]: https://github.com/cockroachdb/cockroach/pull/141601 -[#141634]: https://github.com/cockroachdb/cockroach/pull/141634 -[#141714]: https://github.com/cockroachdb/cockroach/pull/141714 -[#141728]: https://github.com/cockroachdb/cockroach/pull/141728 -[#141738]: https://github.com/cockroachdb/cockroach/pull/141738 -[#141800]: https://github.com/cockroachdb/cockroach/pull/141800 -[#141858]: https://github.com/cockroachdb/cockroach/pull/141858 -[#141929]: https://github.com/cockroachdb/cockroach/pull/141929 -[#141940]: https://github.com/cockroachdb/cockroach/pull/141940 -[#141943]: https://github.com/cockroachdb/cockroach/pull/141943 -[#141944]: https://github.com/cockroachdb/cockroach/pull/141944 -[#141946]: https://github.com/cockroachdb/cockroach/pull/141946 -[#141948]: https://github.com/cockroachdb/cockroach/pull/141948 -[#141953]: https://github.com/cockroachdb/cockroach/pull/141953 -[#142010]: https://github.com/cockroachdb/cockroach/pull/142010 -[#142034]: https://github.com/cockroachdb/cockroach/pull/142034 -[#142061]: https://github.com/cockroachdb/cockroach/pull/142061 -[#142078]: https://github.com/cockroachdb/cockroach/pull/142078 -[#142092]: https://github.com/cockroachdb/cockroach/pull/142092 -[#142094]: https://github.com/cockroachdb/cockroach/pull/142094 -[#142105]: https://github.com/cockroachdb/cockroach/pull/142105 -[#142139]: https://github.com/cockroachdb/cockroach/pull/142139 -[#142147]: https://github.com/cockroachdb/cockroach/pull/142147 -[#142151]: https://github.com/cockroachdb/cockroach/pull/142151 -[#142202]: https://github.com/cockroachdb/cockroach/pull/142202 -[#142231]: https://github.com/cockroachdb/cockroach/pull/142231 -[#142235]: https://github.com/cockroachdb/cockroach/pull/142235 -[#142241]: https://github.com/cockroachdb/cockroach/pull/142241 -[#142249]: https://github.com/cockroachdb/cockroach/pull/142249 -[#142252]: https://github.com/cockroachdb/cockroach/pull/142252 -[#142259]: https://github.com/cockroachdb/cockroach/pull/142259 -[#142277]: https://github.com/cockroachdb/cockroach/pull/142277 -[#142282]: https://github.com/cockroachdb/cockroach/pull/142282 -[#142284]: https://github.com/cockroachdb/cockroach/pull/142284 -[#142336]: https://github.com/cockroachdb/cockroach/pull/142336 -[#142345]: https://github.com/cockroachdb/cockroach/pull/142345 -[#142385]: https://github.com/cockroachdb/cockroach/pull/142385 -[#142434]: https://github.com/cockroachdb/cockroach/pull/142434 -[#142476]: https://github.com/cockroachdb/cockroach/pull/142476 -[#142490]: https://github.com/cockroachdb/cockroach/pull/142490 -[#142501]: https://github.com/cockroachdb/cockroach/pull/142501 -[#142651]: https://github.com/cockroachdb/cockroach/pull/142651 -[#142670]: https://github.com/cockroachdb/cockroach/pull/142670 -[#142682]: https://github.com/cockroachdb/cockroach/pull/142682 -[#142696]: https://github.com/cockroachdb/cockroach/pull/142696 -[#142697]: https://github.com/cockroachdb/cockroach/pull/142697 -[#142698]: https://github.com/cockroachdb/cockroach/pull/142698 -[#142760]: https://github.com/cockroachdb/cockroach/pull/142760 -[#142829]: https://github.com/cockroachdb/cockroach/pull/142829 -[#142838]: https://github.com/cockroachdb/cockroach/pull/142838 -[#135668]: https://github.com/cockroachdb/cockroach/pull/135668 diff --git a/src/current/_includes/releases/v25.2/v25.2.0-alpha.2.md b/src/current/_includes/releases/v25.2/v25.2.0-alpha.2.md index fe2123e53f8..26317e62bf8 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-alpha.2.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-alpha.2.md @@ -7,90 +7,70 @@ Release Date: March 31, 2025

SQL language changes

- `num_nulls()` and `num_nonnulls()` no longer require that all arguments have the same type. - [#141193][#141193] + #141193 - `concat()` no longer requires that all arguments have the same type. - [#141193][#141193] + #141193 - `pg_column_size()` no longer requires that all arguments have the same type. - [#141193][#141193] + #141193 - Users can now begin logical data replication (LDR) on an existing table if the user has a table-level `REPLICATIONDEST` privilege. Furthermore, users can now begin LDR onto an automatically created table if the user has the parent database level `CREATE` privilege. Finally, during bidirectional LDR, the user in the original source URI, who will begin the reverse LDR stream, will authorize via this table-level `REPLICATIONDEST` privilege. - [#142840][#142840] + #142840 - `concat_ws()` now accepts arguments of any type in the second and later positions (the separator must still be a string). - [#141193][#141193] + #141193 - Filters are now supported in JSONPath queries, using the format `$ ? (predicate)`. This allows results to be filtered. For example, `SELECT jsonb_path_query('{"a": [1,2,3]}', '$.a ? (1 == 1)');`. - [#143097][#143097] + #143097 - `format()` no longer requires that all post-format string arguments have the same type. - [#141193][#141193] + #141193 - `json_build_object()`, `jsonb_build_object()`, `json_build_array()`, and `jsonb_build_array()` no longer require that all arguments have the same type. - [#141193][#141193] + #141193 - Added the `jsonb_path_exists` function, which accepts a JSON object and JSONPath query and returns whether the query returned any items. - [#143028][#143028] + #143028 - Addition, subtraction, multiplication, division, and modulo operators are now supported in JSONPath queries. - [#143210][#143210] + #143210

Operational changes

- All `ALTER VIRTUAL CLUSTER REPLICATION JOB` commands for physical cluster replication (PCR), except for `ALTER VIRTUAL CLUSTER SET REPLICATION SOURCE`, will require the `REPLICATIONDEST` privilege, in addition to `MANAGEVIRTUALCLUSTER`. `ALTER VIRTUAL CLUSTER SET REPLICATION SOURCE` now requires the `REPLICATIONSOURCE` privilege. If the ingestion job was created before v25.1, the user can still alter the replication job without the `REPLICATIONDEST` privilege. - [#142772][#142772] + #142772

DB Console changes

- The lock and latch wait time components of a query's cumulative contention time are now tracked separately and surfaced as annotations in `EXPLAIN ANALYZE` output. - [#113649][#113649] + #113649 - The metric that measures cumulative contention time now includes time spent waiting to acquire latches, in addition to time spent acquiring locks. This metric is displayed in both the DB Console and the `EXPLAIN ANALYZE` output. - [#113649][#113649] -- The Replica Quiescence graph on the Replication dashboard in the DB Console now displays the number of replicas quiesced with leader leases. [#143215][#143215] + #113649 +- The Replica Quiescence graph on the Replication dashboard in the DB Console now displays the number of replicas quiesced with leader leases. #143215

Bug fixes

- Fixed a bug where index backfills unnecessarily merged new data written to an index, which could lead to extra contention. - [#142768][#142768] + #142768 - Fixed a bug that could leave behind a dangling reference to a dropped role if that role had default privileges granted to itself. The bug was caused by defining privileges such as: `ALTER DEFAULT - PRIVILEGES FOR ROLE self_referencing_role GRANT INSERT ON TABLES TO self_referencing_role`. [#143287][#143287] + PRIVILEGES FOR ROLE self_referencing_role GRANT INSERT ON TABLES TO self_referencing_role`. #143287 - Fixed a bug that caused changefeeds to fail on startup when scanning a single key. - [#143102][#143102] + #143102 - Fixed a bug where secondary indexes could be unusable by DML statements while a primary key swap was occurring, if the new primary key did not contain columns from the old primary key. - [#141850][#141850] -- Fixed a crash due to `use of enum metadata before hydration` when using LDR with user-defined types. [#143311][#143311] -- MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amounts of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. [#143122][#143122] + #141850 +- Fixed a crash due to `use of enum metadata before hydration` when using LDR with user-defined types. #143311 +- MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amounts of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. #143122 - Fixed a bug where CockroachDB would encounter an internal error when decoding the gists of plans with `CALL` statements. The bug had been present since v23.2. - [#143252][#143252] + #143252 - Fixed a bug where calling a stored procedure could drop the procedure if it had `OUT` parameters that were not used by the calling routine. This bug had existed since PL/pgSQL `CALL` statements were introduced in v24.1. - [#143173][#143173] + #143173 - Fixed a bug where CockroachDB incorrectly resolved routine overloads in some cases. Previously, it allowed creating routines with signatures differing only in type width (e.g., `f(p VARCHAR(1))` and `f(p VARCHAR(2))`), which is not permitted in PostgreSQL. This required precise type casting during invocation. Similarly, when dropping a routine, CockroachDB previously required exact types, unlike PostgreSQL, which is more lenient (e.g., `DROP FUNCTION f(VARCHAR)` would fail in the preceding example). This bug had existed since v23.1. - [#143159][#143159] + #143159 - The reader virtual cluster now starts if the user begins a physical cluster replication (PCR) stream from a cursor via `ALTER VIRTUAL CLUSTER virtual_cluster START REPLICATION OF physical_cluster ON pgurl WITH READ VIRTUAL CLUSTER`. - [#143072][#143072] + #143072

Performance improvements

- Index backfills and row-level TTL deletions that encounter transaction contention will now be retried with smaller batch sizes more quickly, which reduces the latency of these jobs under high-contention workloads. - [#142702][#142702] + #142702 - Queries that use `SHOW TABLES` without using the `estimated_row_count` column no longer need to look up the table statistics. - [#59838][#59838] + #59838

Miscellaneous

- `pg_column_size()` is now regarded as Stable, matching PostgreSQL. As a result, it will no longer be allowed in computed column expressions or partial index predicate expressions. - [#141193][#141193] - - -[#143287]: https://github.com/cockroachdb/cockroach/pull/143287 -[#143122]: https://github.com/cockroachdb/cockroach/pull/143122 -[#59838]: https://github.com/cockroachdb/cockroach/pull/59838 -[#142772]: https://github.com/cockroachdb/cockroach/pull/142772 -[#113649]: https://github.com/cockroachdb/cockroach/pull/113649 -[#143159]: https://github.com/cockroachdb/cockroach/pull/143159 -[#141193]: https://github.com/cockroachdb/cockroach/pull/141193 -[#143210]: https://github.com/cockroachdb/cockroach/pull/143210 -[#143215]: https://github.com/cockroachdb/cockroach/pull/143215 -[#143102]: https://github.com/cockroachdb/cockroach/pull/143102 -[#143311]: https://github.com/cockroachdb/cockroach/pull/143311 -[#143173]: https://github.com/cockroachdb/cockroach/pull/143173 -[#143072]: https://github.com/cockroachdb/cockroach/pull/143072 -[#142840]: https://github.com/cockroachdb/cockroach/pull/142840 -[#143097]: https://github.com/cockroachdb/cockroach/pull/143097 -[#141850]: https://github.com/cockroachdb/cockroach/pull/141850 -[#143252]: https://github.com/cockroachdb/cockroach/pull/143252 -[#142702]: https://github.com/cockroachdb/cockroach/pull/142702 -[#143028]: https://github.com/cockroachdb/cockroach/pull/143028 -[#142768]: https://github.com/cockroachdb/cockroach/pull/142768 + #141193 + + diff --git a/src/current/_includes/releases/v25.2/v25.2.0-alpha.3.md b/src/current/_includes/releases/v25.2/v25.2.0-alpha.3.md index e6cdbddb144..c98ad2ae235 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-alpha.3.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-alpha.3.md @@ -7,79 +7,60 @@ Release Date: April 7, 2025

SQL language changes

- `last` is now supported for array indexing in JSONPath queries. For example, `SELECT jsonb_path_query('[1, 2, 3, 4]', '$[1 to last]');`. - [#143658][#143658] + #143658 - String comparisons are now supported in JSONPath queries. For example, `SELECT jsonb_path_query('{}', '"a" < "b"');`. - [#143240][#143240] + #143240 - Added the `ST_3DLength` function, which returns the 3D or 2D length of `LINESTRING` and `MULTILINESTRING` spatial types. - [#139450][#139450] + #139450 - Updated edge cases in the `width_bucket()` function to return `count + 1` for a positive infinity operand, and `0` for a negative infinity operand, instead of an error. - [#142932][#142932] + #142932 - Unary arithmetic operators are now supported in JSONPath queries. For example, `SELECT jsonb_path_query('[1, 2, 3]', '-$');`. - [#143613][#143613] + #143613 - Implemented various `power()` and `^` edge cases to match PostgreSQL behaviour. Some expressions that previously returned `NaN` now return specific numbers; some expressions that previously returned `Infinity` or `NaN` now return errors; and some expressions with infinite exponents now return different results. - [#142932][#142932] + #142932 - Null comparisons are now supported in JSONPath queries. For example, `SELECT jsonb_path_query('{}', 'null != 1');`. - [#143240][#143240] + #143240 - Wildcard key accessors are now supported in JSONPath queries. For example, `SELECT jsonb_path_query('{"a": 1, "b": true}', '$.*');`. - [#143588][#143588] + #143588 - `like_regex` predicate evaluation is now supported in JSONPath queries. For example, `SELECT jsonb_path_query('{}', '"hello" like_regex "^he.*$"');`. - [#143240][#143240] + #143240

Operational changes

- The `server.client_cert_expiration_cache.capacity` cluster setting has been removed. The `security.certificate.expiration.client` and `security.certificate.ttl.client` metrics now report the lowest value observed for a user in the last 24 hours. - [#143384][#143384] + #143384 - Previously, the user provided in the source URI in the logical data replication (LDR) stream required the `REPLICATIONSOURCE` privilege at the system level. With this change, the user only needs this privilege on the source tables (i.e., a table-level privilege). - [#143456][#143456] + #143456

DB Console changes

- The lock and latch wait time components of a query's cumulative contention time are now tracked separately and surfaced as annotations in `EXPLAIN ANALYZE` output. - [#113649][#113649] + #113649 - The metric that measures cumulative contention time now includes time spent waiting to acquire latches, in addition to time spent acquiring locks. This metric is displayed in both the DB Console and the `EXPLAIN ANALYZE` output. - [#113649][#113649] + #113649

Bug fixes

- Fixed a bug where index backfills unnecessarily merged new data written to an index, which could lead to extra contention. - [#142768][#142768] -- Column IDs are now validated when starting an `immediate` mode logical data replication stream. [#143773][#143773] + #142768 +- Column IDs are now validated when starting an `immediate` mode logical data replication stream. #143773 - Fixed a bug where a GC threshold error (which appears as "batch timestamp must be after replica GC threshold ...") could cause a schema change that backfills data to fail. Now, the error will cause the backfill to be retried at a higher timestamp to avoid the error. - [#143451][#143451] + #143451 - Fixed a bug where index backfill progress before a `PAUSE`/`RESUME` would not get tracked. - [#142602][#142602] + #142602 - Fixed a bug that could cause a function reference to be left behind if a procedure referred to another procedure that depended on a a table, and that table was dropped with `CASCADE`. - [#143538][#143538] + #143538 - Fixed a potential deadlock that could occur during client certificate updates while metrics were being collected. This issue affected the reliability of certificate expiration reporting. - [#143663][#143663] + #143663 - Previously, the fields `maximum memory usage` and `max sql temp disk usage` in the `EXPLAIN ANALYZE` output could be under-reported for distributed plans when memory-intensive operations were fully performed on the remote nodes. This is now fixed. The bug existed in v22.1 and later. - [#143777][#143777] + #143777 - The `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER` syntax is now supported for adding a reader virtual cluster for an existing physical cluster replication (PCR) standby cluster. - [#143752][#143752] + #143752

Performance improvements

- Schema changes that require data to be backfilled no longer hold a protected timestamp for the entire duration of the backfill, which means there is less overhead caused by MVCC garbage collection after the backfill completes. - [#143451][#143451] + #143451 - Fixed a bug that caused the optimizer to over-estimate the cost of inverted index scans in some cases. Now, plans with inverted index scans should be selected in more cases where they are optimal. - [#120079][#120079] + #120079 -[#143588]: https://github.com/cockroachdb/cockroach/pull/143588 -[#143384]: https://github.com/cockroachdb/cockroach/pull/143384 -[#143451]: https://github.com/cockroachdb/cockroach/pull/143451 -[#120079]: https://github.com/cockroachdb/cockroach/pull/120079 -[#143613]: https://github.com/cockroachdb/cockroach/pull/143613 -[#142768]: https://github.com/cockroachdb/cockroach/pull/142768 -[#143773]: https://github.com/cockroachdb/cockroach/pull/143773 -[#143538]: https://github.com/cockroachdb/cockroach/pull/143538 -[#143752]: https://github.com/cockroachdb/cockroach/pull/143752 -[#143674]: https://github.com/cockroachdb/cockroach/pull/143674 -[#143240]: https://github.com/cockroachdb/cockroach/pull/143240 -[#142602]: https://github.com/cockroachdb/cockroach/pull/142602 -[#143663]: https://github.com/cockroachdb/cockroach/pull/143663 -[#143777]: https://github.com/cockroachdb/cockroach/pull/143777 -[#143658]: https://github.com/cockroachdb/cockroach/pull/143658 -[#139450]: https://github.com/cockroachdb/cockroach/pull/139450 -[#142932]: https://github.com/cockroachdb/cockroach/pull/142932 -[#143456]: https://github.com/cockroachdb/cockroach/pull/143456 -[#113649]: https://github.com/cockroachdb/cockroach/pull/113649 diff --git a/src/current/_includes/releases/v25.2/v25.2.0-beta.1.md b/src/current/_includes/releases/v25.2/v25.2.0-beta.1.md index bec895e843c..9fec0bcc0da 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-beta.1.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-beta.1.md @@ -7,55 +7,40 @@ Release Date: April 14, 2025

SQL language changes

- Set-returning functions (SRF) are now supported in PL/pgSQL. A PL/pgSQL SRF can be created by declaring the return type as `SETOF ` or `TABLE`. - [#143820][#143820] + #143820 - Usage of `TG_ARGV` in trigger functions is now disallowed by default. The session setting `allow_create_trigger_function_with_argv_references` can be set to `true` to allow usage (with 1-based indexing). - [#143827][#143827] -- The return type of the `workload_index_recs` built-in function now includes two columns. The first column, `index_rec`, remains a `STRING` type and contains the index recommendation. The second column, `fingerprint_ids`, is new and has the `BYTES[]` type. [#142927][#142927] + #143827 +- The return type of the `workload_index_recs` built-in function now includes two columns. The first column, `index_rec`, remains a `STRING` type and contains the index recommendation. The second column, `fingerprint_ids`, is new and has the `BYTES[]` type. #142927 - The job description for `AUTO CREATE PARTIAL STATS` now clearly indicates that the job is for automatic partial statistics collection, improving `system.jobs` visibility and debugging. - [#143283][#143283] + #143283 - A new `execution time` statistic is now reported on `EXPLAIN ANALYZE` output for most operators. Previously, this statistic was only available on the DistSQL diagrams in `EXPLAIN ANALYZE (DISTSQL)` output. - [#143857][#143857] + #143857 - `() is unknown` is now supported in JSONPath queries. For example, `SELECT jsonb_path_query('{}', '($ < 1) is unknown');`. - [#143668][#143668] + #143668 - `starts with ""` is now supported in JSONPath queries. For example, `SELECT jsonb_path_query('"abcdef"', '$ starts with "abc"');`. - [#143675][#143675] + #143675

Operational changes

- The `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting was retired. - [#143124][#143124] + #143124 - `debug zip` queries are now attributed to internal SQL metrics. As a result, users will no longer see their impact on the SQL charts in the DB Console. - [#143711][#143711] + #143711

Bug fixes

- Fixed an issue where hot range logging for virtual clusters omitted some hot ranges. - [#143775][#143775] + #143775 - MVCC garbage collection is now fully subject to IO admission control. Previously, it was possible for MVCC GC to cause store overload (such as LSM inversion) when a large amount of data would become eligible for garbage collection. Should any issues arise from subjecting MVCC GC to admission control, the `kv.mvcc_gc.queue_kv_admission_control.enabled` cluster setting can be set to `false` to restore the previous behavior. - [#143122][#143122] + #143122 - Fixed a bug that could cause a stack overflow during execution of a prepared statement that invoked a PL/pgSQL routine with a loop. The bug existed in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, v25.1.3, and pre-release versions of v25.2 prior to v25.2.0-alpha.3. - [#144027][#144027] + #144027 - Fixed an issue where change data capture queries on tables without columns would fail with an internal error: `unable to determine result columns`. - [#142068][#142068] + #142068 - Previously, statement bundle collection could encounter `not enough privileges` errors when retrieving necessary information (e.g., cluster settings, table statistics, etc.) when the user that requested the bundle was different from the user that actually ran the query. This is now fixed. The bug was present since v20.2 and would result in partially incomplete bundles. - [#144178][#144178] + #144178 - Fixed an issue where databases, tables, and indexes were not appearing on the Hot Ranges page for application virtual clusters. - [#143441][#143441] - - -[#143827]: https://github.com/cockroachdb/cockroach/pull/143827 -[#143675]: https://github.com/cockroachdb/cockroach/pull/143675 -[#143124]: https://github.com/cockroachdb/cockroach/pull/143124 -[#143711]: https://github.com/cockroachdb/cockroach/pull/143711 -[#143775]: https://github.com/cockroachdb/cockroach/pull/143775 -[#142068]: https://github.com/cockroachdb/cockroach/pull/142068 -[#143441]: https://github.com/cockroachdb/cockroach/pull/143441 -[#142927]: https://github.com/cockroachdb/cockroach/pull/142927 -[#143283]: https://github.com/cockroachdb/cockroach/pull/143283 -[#143857]: https://github.com/cockroachdb/cockroach/pull/143857 -[#144027]: https://github.com/cockroachdb/cockroach/pull/144027 -[#143820]: https://github.com/cockroachdb/cockroach/pull/143820 -[#143668]: https://github.com/cockroachdb/cockroach/pull/143668 -[#143122]: https://github.com/cockroachdb/cockroach/pull/143122 -[#144178]: https://github.com/cockroachdb/cockroach/pull/144178 + #143441 + + diff --git a/src/current/_includes/releases/v25.2/v25.2.0-beta.2.md b/src/current/_includes/releases/v25.2/v25.2.0-beta.2.md index dff4a3d5ba2..72bd63b482c 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-beta.2.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-beta.2.md @@ -7,37 +7,30 @@ Release Date: April 23, 2025

SQL language changes

- Added the `jsonb_path_match` function, which returns the result of a predicate query. - [#144271][#144271] + #144271 - The `.type()` method is now supported in JSONPath queries. For example, `SELECT jsonb_path_query('[1, 2, 3]', '$.type()');`. - [#144405][#144405] + #144405 - Removed the `ST_3DLength` function. - [#144549][#144549] + #144549 - Added the `jsonb_path_query_first` function, which returns the first result from `jsonb_path_query`. - [#144271][#144271] + #144271 - Parenthesized expressions are now supported in JSONPath queries. For example, `SELECT jsonb_path_query('{"a": {"b": true}}', '($.a).b');` - [#144298][#144298] + #144298 - The `.size()` method is now supported in JSONPath expressions. For example, `SELECT jsonb_path_query('[1, 2, 3]', '$.size()');`. - [#144405][#144405] + #144405 - Added the `jsonb_path_query_array` function, which returns the result of `jsonb_path_query` wrapped in a JSON array. - [#144271][#144271] + #144271

Operational changes

- Logical data replication (LDR) now supports partial indexes by default. - [#144513][#144513] + #144513

Miscellaneous

- Fixed a rare corruption bug that could affect `IMPORT`, physical cluster replication (PCR), `CREATE TABLE AS` (CTAS), and materialized view refreshes. - [#144663][#144663] + #144663 - Vector indexes created in v25.2.0-beta.1 are not compatible with later releases. Drop and re-create these indexes before using them with later releases. - [#144581][#144581] + #144581 -[#144581]: https://github.com/cockroachdb/cockroach/pull/144581 -[#144271]: https://github.com/cockroachdb/cockroach/pull/144271 -[#144405]: https://github.com/cockroachdb/cockroach/pull/144405 -[#144549]: https://github.com/cockroachdb/cockroach/pull/144549 -[#144298]: https://github.com/cockroachdb/cockroach/pull/144298 -[#144513]: https://github.com/cockroachdb/cockroach/pull/144513 -[#144663]: https://github.com/cockroachdb/cockroach/pull/144663 diff --git a/src/current/_includes/releases/v25.2/v25.2.0-beta.3.md b/src/current/_includes/releases/v25.2/v25.2.0-beta.3.md index 04f425c4891..f43efab78d0 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-beta.3.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-beta.3.md @@ -6,36 +6,27 @@ Release Date: April 28, 2025

SQL language changes

-- `CREATE VECTOR INDEX` and `ALTER PRIMARY KEY` now send a notice that vector indexes will be offline during the change operation when the `sql_safe_updates` session setting is disabled. [#144601][#144601] -- Vector indexes do not support mutation while being created with `CREATE INDEX` or rebuilt with `ALTER PRIMARY KEY`. To prevent inadvertent application downtime, set the `sql_safe_updates` session setting to `false` when using `CREATE INDEX` or `ALTER PRIMARY KEY` with a vector index. [#144601][#144601] -- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`, etc.) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). [#144600][#144600] +- `CREATE VECTOR INDEX` and `ALTER PRIMARY KEY` now send a notice that vector indexes will be offline during the change operation when the `sql_safe_updates` session setting is disabled. #144601 +- Vector indexes do not support mutation while being created with `CREATE INDEX` or rebuilt with `ALTER PRIMARY KEY`. To prevent inadvertent application downtime, set the `sql_safe_updates` session setting to `false` when using `CREATE INDEX` or `ALTER PRIMARY KEY` with a vector index. #144601 +- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`, etc.) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). #144600

Bug fixes

-- Fixed a bug that could cause a changefeed to complete erroneously when one of its watched tables encounters a schema change. [#144717][#144717] +- Fixed a bug that could cause a changefeed to complete erroneously when one of its watched tables encounters a schema change. #144717 - Fixed a bug in the DB Console where tables with page size dropdowns failed to update when a new page size option was selected. Tables now update correctly. - [#144666][#144666] + #144666 - Fixed the following bugs in the **Schedules** page of the DB Console: - Where the **Schedules** page displayed only a subset of a cluster's schedules. The **Schedules** page now correctly displays all schedules. - Where manually updating the `show` or `status` parameters in the URL (e.g., `http://127.0.0.1:8080/#/schedules?status=ACTIVE&show=50`) caused the **Schedules** page to fail to load. - [#144807][#144807] + #144807 - Fixed a bug in the **SQL Activity Statements** page where filtering by **Statement Type** returned no results. The filter now works as expected. - [#144851][#144851] + #144851 - Fixed a bug in the DB Console where the **Drop unused index** tag appeared multiple times for an index on the **Indexes** tab of the **Table Details** page. - [#144656][#144656] + #144656

Performance improvements

-- Triggers now perform the descriptor lookup for `TG_TABLE_SCHEMA` against a cache. This can significantly reduce trigger planning latency in multi-region databases. [#144521][#144521] -- The vector search optimizer rule now supports additional projections beyond the distance column, including the implicit projections added for virtual columns. [#144583][#144583] +- Triggers now perform the descriptor lookup for `TG_TABLE_SCHEMA` against a cache. This can significantly reduce trigger planning latency in multi-region databases. #144521 +- The vector search optimizer rule now supports additional projections beyond the distance column, including the implicit projections added for virtual columns. #144583 -[#144656]: https://github.com/cockroachdb/cockroach/pull/144656 -[#144521]: https://github.com/cockroachdb/cockroach/pull/144521 -[#144583]: https://github.com/cockroachdb/cockroach/pull/144583 -[#144601]: https://github.com/cockroachdb/cockroach/pull/144601 -[#144600]: https://github.com/cockroachdb/cockroach/pull/144600 -[#144807]: https://github.com/cockroachdb/cockroach/pull/144807 -[#144851]: https://github.com/cockroachdb/cockroach/pull/144851 -[#144717]: https://github.com/cockroachdb/cockroach/pull/144717 -[#144666]: https://github.com/cockroachdb/cockroach/pull/144666 diff --git a/src/current/_includes/releases/v25.2/v25.2.0-rc.1.md b/src/current/_includes/releases/v25.2/v25.2.0-rc.1.md index 7d519f2a400..6d71a019048 100644 --- a/src/current/_includes/releases/v25.2/v25.2.0-rc.1.md +++ b/src/current/_includes/releases/v25.2/v25.2.0-rc.1.md @@ -7,51 +7,39 @@ Release Date: May 12, 2025

SQL language changes

- Non-integer array indices are now supported in JSONPath queries (e.g., `SELECT jsonb_path_query('[1, 2, 3]', '$[2.5]');`). Indices are rounded toward 0. - [#144819][#144819] + #144819 - The `vector_l2_ops` operator class can now be specified for a vector index. Because `vector_l2_ops` is the default, it is possible to omit the operator class from an index definition. - [#144902][#144902] + #144902 - When creating a vector index with the `USING` syntax, `hnsw` can now be specified as the index type, although a `cspann` vector index is still provided. This change increases compatibility with third-party tools. - [#144902][#144902] + #144902 - Added support for numeric JSONPath methods `.abs()`, `.floor()`, `.ceiling()`. For example, `SELECT jsonb_path_query('-0.5', '$.abs()');`. - [#145121][#145121] + #145121 - Disabled `IMPORT INTO` for tables with vector indexes, because importing into vector indexes is not implemented. - [#145262][#145262] + #145262 - Added support for `like_regex` flags in JSONPath queries. For example, `SELECT jsonb_path_query('{}', '"a" like_regex ".*" flag "i"');`. - [#145300][#145300] + #145300 - Vector index creation is now prevented until the entire cluster upgrade has been finalized on v25.2 or later. - [#145449][#145449] + #145449

Bug fixes

- `NULL` vectors can now be inserted into tables with vector indexes. - [#144858][#144858] + #144858 - Fixed a bug that caused vector indexes to return incorrect or no results from a standby reader in a physical cluster replication (PCR) setup. This bug existed in alpha versions of v25.2 and in v25.2.0-beta.1. - [#145157][#145157] + #145157 - Fixed a bug that allowed a set-returning PL/pgSQL function to be created before the version change was finalized. This bug existed in v25.2 alpha and beta releases. - [#145381][#145381] + #145381 - Fixed a bug where CockroachDB could encounter an internal error when fetching from the `WITH HOLD` cursor with `FETCH FIRST` and `FETCH ABSOLUTE`. The bug was only present in v25.2 alpha and beta releases. - [#145409][#145409] + #145409

Performance improvements

- Some internal queries executed by the jobs system are now less likely to perform full table scans of the `system.jobs` table, making them more efficient. This change can be reverted by disabling the `jobs.avoid_full_scans_in_find_running_jobs.enabled` cluster setting. - [#144825][#144825] + #144825

Miscellaneous

- SQL queries run on the source cluster by logical data replication (LDR) and physical cluster replication (PCR) will account to internal metrics like `sql.statements.active.internal` instead of the metrics like `sql.statements.active` that are used to monitor application workload. - [#145111][#145111] - - -[#144819]: https://github.com/cockroachdb/cockroach/pull/144819 -[#145121]: https://github.com/cockroachdb/cockroach/pull/145121 -[#145262]: https://github.com/cockroachdb/cockroach/pull/145262 -[#145300]: https://github.com/cockroachdb/cockroach/pull/145300 -[#145381]: https://github.com/cockroachdb/cockroach/pull/145381 -[#145409]: https://github.com/cockroachdb/cockroach/pull/145409 -[#145111]: https://github.com/cockroachdb/cockroach/pull/145111 -[#144902]: https://github.com/cockroachdb/cockroach/pull/144902 -[#145449]: https://github.com/cockroachdb/cockroach/pull/145449 -[#144858]: https://github.com/cockroachdb/cockroach/pull/144858 -[#145157]: https://github.com/cockroachdb/cockroach/pull/145157 -[#144825]: https://github.com/cockroachdb/cockroach/pull/144825 + #145111 + + diff --git a/src/current/_includes/releases/v25.2/v25.2.1.md b/src/current/_includes/releases/v25.2/v25.2.1.md index 721850ca321..bc669470375 100644 --- a/src/current/_includes/releases/v25.2/v25.2.1.md +++ b/src/current/_includes/releases/v25.2/v25.2.1.md @@ -7,98 +7,69 @@ Release Date: June 4, 2025

SQL language changes

- Added the `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` cluster settings. These settings default to `false`. Set them to `true` to include the application and database name, respectively, in supported metrics. - [#144932][#144932] + #144932

Operational changes

- Added the metric `changefeed.checkpoint.timestamp_count` that measures the number of unique timestamps in a changefeed span-level checkpoint. It may be useful to monitor this metric to determine if quantization settings should be changed. - [#145223][#145223] + #145223 - Logs for hot ranges (`hot_ranges_stats` events) have been moved to the `HEALTH` logging channel. - [#146762][#146762] + #146762

DB Console changes

- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. - [#145993][#145993] + #145993

Bug fixes

- Improved the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. - [#145004][#145004] + #145004 - Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. - [#145363][#145363] + #145363 - Fixed a bug where running `DROP INDEX` on a hash-sharded index did not properly detect dependencies from functions and procedures on the shard column. This caused the `DROP INDEX` statement to fail with an internal validation error. Now the statement returns a correct error message, and using `DROP INDEX ... CASCADE` works as expected by dropping the dependent functions and procedures. - [#145386][#145386] + #145386 - Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. - [#145545][#145545] + #145545 - Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. - [#145576][#145576] + #145576 - Fixed a bug that could cause a row-level TTL job to fail with the error "comparison of two different versions of enum" if an `ENUM` type referenced by the table experienced a schema change. - [#145917][#145917] + #145917 - Fixed a bug where the physical cluster replication (PCR) reader catalog job could hit validation errors when schema objects had dependencies between them (for example, when a sequence's default expression was being removed). - [#145999][#145999] + #145999 - Creating a vector index on a table that contains a `NULL` vector value will no longer cause an internal error. - [#146017][#146017] + #146017 - Row-level security (RLS) `SELECT` policies during `UPDATE` operations are now only applied when referenced columns appear in the `SET` or `WHERE` clauses, matching the behavior of PostgreSQL. This improves compatibility. - [#146128][#146128] + #146128 - Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. - [#146196][#146196] + #146196 - Fixed incorrect application of row-level security (RLS) `SELECT` policies to `RETURNING` clauses in `INSERT` and `UPDATE` when no table columns were referenced. - [#146292][#146292] + #146292 - Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. - [#146326][#146326] + #146326 - Fixed a bug that could lead to a node stall. - [#146409][#146409] + #146409 - Fixed an integer overflow in the `split_part` function when using extremely negative field positions like Go's `math.MinInt64`. - [#146413][#146413] + #146413 - Fixed a bug where an invalid comment in the `system.comment` table for a schema object could make it inaccessible. - [#146418][#146418] + #146418 - Fixed a bug where a CockroachDB node could crash when executing `DO` statements that contain currently unsupported DDL statements like `CREATE TYPE` in a non-default configuration (additional logging needed to be enabled, e.g., via the `sql.log.all_statements.enabled` cluster setting). This bug was introduced in v25.1. - [#146501][#146501] + #146501 - Prevent use of future timestamps when using `AS OF SYSTEM TIME` with `CREATE TABLE ... AS` and materialized views. Previously, such timestamps could cause errors, delays, or hangs. - [#146605][#146605] + #146605 - Fixed a bug where CockroachDB would not use the vectorized fast path for `COPY` when it was supported. The bug was only present in previous v25.2 releases. - [#146697][#146697] + #146697 - Fixed an internal error that could be hit when `ADD COLUMN UNIQUE` and `ALTER PRIMARY KEY` were executed within the same transaction. - [#146743][#146743] + #146743 - Fixed a bug where `ALTER TABLE` operations with multiple commands could generate invalid zone configurations. - [#146750][#146750] + #146750 - Fixed a bug in v25.2.0 where a vector search operator could drop user-supplied filters if the same vector column was indexed twice and a vector index with no prefix columns was defined after a vector index with prefix columns. - [#146849][#146849] + #146849 - Fixed an issue where updating child metrics and reinitializing metrics at the same time could cause scrape errors. - [#147531][#147531] + #147531 - Fixed a runtime panic in the `substring_index` function that occurred when the count argument was the minimum 64-bit integer value. - [#147550][#147550] + #147550 - Fixed a memory leak in index backfill jobs where completed spans were duplicated in memory on each progress update after resuming from a checkpoint. This could cause out-of-memory (OOM) errors when backfilling indexes on large tables with many ranges. This bug affected release version v25.2.0 and pre-release versions v25.2.0-alpha.3 through v25.2.0-rc.1. - [#147564][#147564] + #147564 -[#146017]: https://github.com/cockroachdb/cockroach/pull/146017 -[#146697]: https://github.com/cockroachdb/cockroach/pull/146697 -[#147564]: https://github.com/cockroachdb/cockroach/pull/147564 -[#147550]: https://github.com/cockroachdb/cockroach/pull/147550 -[#146762]: https://github.com/cockroachdb/cockroach/pull/146762 -[#145363]: https://github.com/cockroachdb/cockroach/pull/145363 -[#146326]: https://github.com/cockroachdb/cockroach/pull/146326 -[#146409]: https://github.com/cockroachdb/cockroach/pull/146409 -[#145386]: https://github.com/cockroachdb/cockroach/pull/145386 -[#145917]: https://github.com/cockroachdb/cockroach/pull/145917 -[#146501]: https://github.com/cockroachdb/cockroach/pull/146501 -[#146605]: https://github.com/cockroachdb/cockroach/pull/146605 -[#145223]: https://github.com/cockroachdb/cockroach/pull/145223 -[#145999]: https://github.com/cockroachdb/cockroach/pull/145999 -[#146849]: https://github.com/cockroachdb/cockroach/pull/146849 -[#147531]: https://github.com/cockroachdb/cockroach/pull/147531 -[#144610]: https://github.com/cockroachdb/cockroach/pull/144610 -[#146196]: https://github.com/cockroachdb/cockroach/pull/146196 -[#145993]: https://github.com/cockroachdb/cockroach/pull/145993 -[#146128]: https://github.com/cockroachdb/cockroach/pull/146128 -[#146413]: https://github.com/cockroachdb/cockroach/pull/146413 -[#146743]: https://github.com/cockroachdb/cockroach/pull/146743 -[#146750]: https://github.com/cockroachdb/cockroach/pull/146750 -[#144932]: https://github.com/cockroachdb/cockroach/pull/144932 -[#145004]: https://github.com/cockroachdb/cockroach/pull/145004 -[#145545]: https://github.com/cockroachdb/cockroach/pull/145545 -[#145576]: https://github.com/cockroachdb/cockroach/pull/145576 -[#146292]: https://github.com/cockroachdb/cockroach/pull/146292 -[#146418]: https://github.com/cockroachdb/cockroach/pull/146418 diff --git a/src/current/_includes/releases/v25.2/v25.2.10.md b/src/current/_includes/releases/v25.2/v25.2.10.md index b9f72be98d8..9baa79d8626 100644 --- a/src/current/_includes/releases/v25.2/v25.2.10.md +++ b/src/current/_includes/releases/v25.2/v25.2.10.md @@ -6,25 +6,20 @@ Release Date: December 12, 2025

DB Console changes

-- The background (elastic) store graphs for exhausted duration, and the wait duration histogram, have been separated from the foreground (regular) graphs. [#156868][#156868] +- The background (elastic) store graphs for exhausted duration, and the wait duration histogram, have been separated from the foreground (regular) graphs. #156868

Bug fixes

-- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. [#156521][#156521] -- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. [#156977][#156977] +- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. #156521 +- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. #156977

Performance improvements

-- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., `x = $1`). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. [#156797][#156797] +- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., `x = $1`). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. #156797

Miscellaneous

- Span config reconciliation jobs no longer fail on the - destination after failover from a PCR stream of a system virtual cluster. [#156810][#156810] + destination after failover from a PCR stream of a system virtual cluster. #156810 -[#156797]: https://github.com/cockroachdb/cockroach/pull/156797 -[#156810]: https://github.com/cockroachdb/cockroach/pull/156810 -[#156868]: https://github.com/cockroachdb/cockroach/pull/156868 -[#156521]: https://github.com/cockroachdb/cockroach/pull/156521 -[#156977]: https://github.com/cockroachdb/cockroach/pull/156977 diff --git a/src/current/_includes/releases/v25.2/v25.2.11.md b/src/current/_includes/releases/v25.2/v25.2.11.md index cb6ad30c39f..04c7b9661cc 100644 --- a/src/current/_includes/releases/v25.2/v25.2.11.md +++ b/src/current/_includes/releases/v25.2/v25.2.11.md @@ -6,35 +6,25 @@ Release Date: January 9, 2026

Operational changes

-- Successfully completed automatic SQL stats collecton jobs are now automatically purged rather than being retained for the full default job retention period. [#159411][#159411] +- Successfully completed automatic SQL stats collecton jobs are now automatically purged rather than being retained for the full default job retention period. #159411

Bug fixes

-- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. [#158344][#158344] -- Attempting to create a vector index with the legacy schema changer will now fail gracefully instead of crashing the node. [#158718][#158718] +- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. #158344 +- Attempting to create a vector index with the legacy schema changer will now fail gracefully instead of crashing the node. #158718 - Fixed a bug that could cause incorrect query results when using prepared statements with _NULL_ placeholders. The bug has existed since v21.2 and violated SQL _NULL_-equality semantics by returning rows with _NULL_ values when the result set should have been empty. From v21.2 to v25.3, the bug occurred when all of the following were true: - The query was run with an explicit or implicit prepared statement - The query had an equality filter on a placeholder and a `UNIQUE` column - The column contained _NULL_ values - The placeholder was assigned to _NULL_ during execution - - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. [#159070][#159070] -- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159537][#159537] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159546][#159546] -- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in v24.3.0 and was present in all versions since. [#159775][#159775] -- Fixed a deadlock that could occur when a statistics creation task panicked. [#160582][#160582] + - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. #159070 +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. #159537 +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. #159546 +- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in v24.3.0 and was present in all versions since. #159775 +- Fixed a deadlock that could occur when a statistics creation task panicked. #160582

Performance improvements

-- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. [#159387][#159387] -- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple `TableReaders` by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. [#160605][#160605] - -[#159411]: https://github.com/cockroachdb/cockroach/pull/159411 -[#158344]: https://github.com/cockroachdb/cockroach/pull/158344 -[#158718]: https://github.com/cockroachdb/cockroach/pull/158718 -[#159775]: https://github.com/cockroachdb/cockroach/pull/159775 -[#160582]: https://github.com/cockroachdb/cockroach/pull/160582 -[#159070]: https://github.com/cockroachdb/cockroach/pull/159070 -[#159537]: https://github.com/cockroachdb/cockroach/pull/159537 -[#159546]: https://github.com/cockroachdb/cockroach/pull/159546 -[#159387]: https://github.com/cockroachdb/cockroach/pull/159387 -[#160605]: https://github.com/cockroachdb/cockroach/pull/160605 +- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. #159387 +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple `TableReaders` by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. #160605 + diff --git a/src/current/_includes/releases/v25.2/v25.2.12.md b/src/current/_includes/releases/v25.2/v25.2.12.md index 89d1e183346..e0713860922 100644 --- a/src/current/_includes/releases/v25.2/v25.2.12.md +++ b/src/current/_includes/releases/v25.2/v25.2.12.md @@ -6,30 +6,23 @@ Release Date: February 11, 2026

General changes

-- Changefeeds now support the `partition_alg` option for specifying a kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';` Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed continues using the `murmur2` algorithm unless the changefeed is altered to use a differed `partition_alg`. [#161525][#161525] +- Changefeeds now support the `partition_alg` option for specifying a kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';` Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed continues using the `murmur2` algorithm unless the changefeed is altered to use a differed `partition_alg`. #161525

Bug fixes

-- Fixed a deadlock that could occur when a statistics creation task panicked. [#160419][#160419] -- Fixed a bug where dropping a trigger on a table with a self-referencing foreign key could cause a missing reference between the objects. [#161025][#161025] -- Fixed a bug where `IMPORT` with Avro data using `OCF` format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since approximately v20.1. [#161322][#161322] +- Fixed a deadlock that could occur when a statistics creation task panicked. #160419 +- Fixed a bug where dropping a trigger on a table with a self-referencing foreign key could cause a missing reference between the objects. #161025 +- Fixed a bug where `IMPORT` with Avro data using `OCF` format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since approximately v20.1. #161322 - Fixed an error that occurred when using generic plan that generates a lookup join on indexes containing identity - computed columns. [#162339][#162339] + computed columns. #162339

Performance improvements

-- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. [#160630][#160630] +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. #160630

Miscellaneous

-- `kv.transaction.write_buffering.enabled` is removed from the public cluster settings to better reflect its preview status. [#162027][#162027] +- `kv.transaction.write_buffering.enabled` is removed from the public cluster settings to better reflect its preview status. #162027 -[#161525]: https://github.com/cockroachdb/cockroach/pull/161525 -[#160419]: https://github.com/cockroachdb/cockroach/pull/160419 -[#161025]: https://github.com/cockroachdb/cockroach/pull/161025 -[#161322]: https://github.com/cockroachdb/cockroach/pull/161322 -[#162339]: https://github.com/cockroachdb/cockroach/pull/162339 -[#160630]: https://github.com/cockroachdb/cockroach/pull/160630 -[#162027]: https://github.com/cockroachdb/cockroach/pull/162027 diff --git a/src/current/_includes/releases/v25.2/v25.2.13.md b/src/current/_includes/releases/v25.2/v25.2.13.md index 430041c5f5b..b4dba656225 100644 --- a/src/current/_includes/releases/v25.2/v25.2.13.md +++ b/src/current/_includes/releases/v25.2/v25.2.13.md @@ -6,9 +6,7 @@ Release Date: February 19, 2026

Bug fixes

-- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. [#163773][#163773] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. [#163801][#163801] +- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. #163773 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. #163801 -[#163773]: https://github.com/cockroachdb/cockroach/pull/163773 -[#163801]: https://github.com/cockroachdb/cockroach/pull/163801 diff --git a/src/current/_includes/releases/v25.2/v25.2.14.md b/src/current/_includes/releases/v25.2/v25.2.14.md index 7392352f439..4f2babc8085 100644 --- a/src/current/_includes/releases/v25.2/v25.2.14.md +++ b/src/current/_includes/releases/v25.2/v25.2.14.md @@ -6,7 +6,6 @@ Release Date: March 5, 2026

Bug fixes

-- Fixed a bug where an internal context structure could grow unboundedly over time. In rare cases, on nodes running continuously for several months or more, this could cause the `cockroach` process to appear stalled when a CPU profile was requested.​​​​​​​​​​​​​​​​ [#164779][#164779] +- Fixed a bug where an internal context structure could grow unboundedly over time. In rare cases, on nodes running continuously for several months or more, this could cause the `cockroach` process to appear stalled when a CPU profile was requested.​​​​​​​​​​​​​​​​ #164779 -[#164779]: https://github.com/cockroachdb/cockroach/pull/164779 diff --git a/src/current/_includes/releases/v25.2/v25.2.15.md b/src/current/_includes/releases/v25.2/v25.2.15.md index 81362e6b0b9..0b87e784f44 100644 --- a/src/current/_includes/releases/v25.2/v25.2.15.md +++ b/src/current/_includes/releases/v25.2/v25.2.15.md @@ -6,9 +6,7 @@ Release Date: March 9, 2026

Bug fixes

-- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162118][#162118] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163357][#163357] +- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. #162118 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. #163357 -[#162118]: https://github.com/cockroachdb/cockroach/pull/162118 -[#163357]: https://github.com/cockroachdb/cockroach/pull/163357 diff --git a/src/current/_includes/releases/v25.2/v25.2.16.md b/src/current/_includes/releases/v25.2/v25.2.16.md index 1a7dba0bc6e..5c0637f1575 100644 --- a/src/current/_includes/releases/v25.2/v25.2.16.md +++ b/src/current/_includes/releases/v25.2/v25.2.16.md @@ -6,8 +6,6 @@ Release Date: April 3, 2026

Bug fixes

-- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. [#164889][#164889] -- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165570][#165570] +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. #164889 +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. #165570 -[#164889]: https://github.com/cockroachdb/cockroach/pull/164889 -[#165570]: https://github.com/cockroachdb/cockroach/pull/165570 diff --git a/src/current/_includes/releases/v25.2/v25.2.17.md b/src/current/_includes/releases/v25.2/v25.2.17.md index 235edd72e65..31463e52f83 100644 --- a/src/current/_includes/releases/v25.2/v25.2.17.md +++ b/src/current/_includes/releases/v25.2/v25.2.17.md @@ -6,9 +6,7 @@ Release Date: April 20, 2026

Bug fixes

-- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#168473][#168473] -- Fixed a bug that caused Logical Data Replication (LDR) job creation to fail during rolling upgrades from v24.3 directly to v25.2. [#167433][#167433] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. #168473 +- Fixed a bug that caused Logical Data Replication (LDR) job creation to fail during rolling upgrades from v24.3 directly to v25.2. #167433 -[#168473]: https://github.com/cockroachdb/cockroach/pull/168473 -[#167433]: https://github.com/cockroachdb/cockroach/pull/167433 diff --git a/src/current/_includes/releases/v25.2/v25.2.2.md b/src/current/_includes/releases/v25.2/v25.2.2.md index 7bc53ea041f..4851cef85e8 100644 --- a/src/current/_includes/releases/v25.2/v25.2.2.md +++ b/src/current/_includes/releases/v25.2/v25.2.2.md @@ -7,68 +7,50 @@ Release Date: June 25, 2025

Operational changes

- When `server.telemetry.hot_ranges_stats.enabled` cluster setting is enabled, nodes now log hot ranges every minute if they exceed 250ms of CPU time per second. In multi-tenant deployments, this check occurs every 5 minutes at the cluster level, improving visibility into transient performance issues. - [#146887][#146887] + #146887 - Added a new metric, `kv.loadsplitter.cleardirection`, which increments when the load-based splitter observes that more than 80% of replica access samples are moving in a single direction (either left/descending or right/ascending). - [#147169][#147169] + #147169

DB Console changes

- The Hot Ranges page node filter has been moved out of the main filter container and now filters nodes on the backend to reduce load time. - [#147778][#147778] + #147778

Bug fixes

- Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). - [#146886][#146886] + #146886 - Fixed a bug where the `kv.rangefeed.closed_timestamp.slow_ranges` would not be incremented when a rangefeed closed timestamp was slower than the target threshold. - [#146975][#146975] + #146975 - Fixed a bug that could cause an `AFTER` trigger to fail with `client already committed or rolled back the transaction` if the query also contained foreign-key cascades. The bug had existed since `AFTER` triggers were introduced in v24.3. - [#146977][#146977] + #146977 - Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. - [#147223][#147223] + #147223 - Previously, CockroachDB could incorrectly evaluate the `to_regclass`, `to_regnamespace`, `to_regproc`, `to_regprocedure`, `to_regrole`, and `to_regtype` built-in functions when the query using them was evaluated in a distributed fashion. The bug was introduced with these built-in functions in v23.1 and is now fixed. - [#147376][#147376] + #147376 - Fixed a bug that caused the optimizer to ignore index hints when optimizing some forms of prepared statements. This could result in one of two unexpected behaviors: a query errors with the message `index cannot be used for this query` when the index can actually be used; or a query uses an index that does not adhere to the hint. The hints relevant to this bug are regular index hints, e.g., `SELECT * FROM tab@index`, `FORCE_INVERTED_INDEX`, and `FORCE_ZIGZAG`. - [#147417][#147417] + #147417 - Fixed a bug where the `pg_catalog.pg_policy` table could contain duplicate OID values when multiple tables had policies with the same policy ID. All rows in `pg_policy` now have unique OIDs as required. - [#147438][#147438] + #147438 - Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in v23.2.22, v24.1.14, v24.3.9, v25.1.2, and the v25.2 alpha. - [#147460][#147460] + #147460 - Fixed a runtime panic in the `substring_index` function that occurred when the count argument was the minimum 64-bit integer value. - [#147549][#147549] + #147549 - Fixed a memory leak in index backfill jobs where completed spans were duplicated in memory on each progress update after resuming from a checkpoint. This could cause out-of-memory (OOM) errors when backfilling indexes on large tables with many ranges. This bug affected release version v25.2.0 and pre-release versions v25.2.0-alpha.3 through v25.2.0-rc.1. - [#147563][#147563] + #147563 - Fixed a bug where prepared statements on schema changes could fail with runtime errors. - [#147671][#147671] + #147671 - Fixed a bug where `ALTER TABLE` was modifying identity attributes on columns not backed by a sequence. - [#147711][#147711] + #147711

Performance improvements

- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. - [#147083][#147083] + #147083

Miscellaneous

- Fixed an issue in Logical Data Replication (LDR) where unique indexes with lower index IDs than the primary key could cause incorrect DLQ entries during replication. - [#147350][#147350] + #147350 -[#147169]: https://github.com/cockroachdb/cockroach/pull/147169 -[#147778]: https://github.com/cockroachdb/cockroach/pull/147778 -[#146886]: https://github.com/cockroachdb/cockroach/pull/146886 -[#146975]: https://github.com/cockroachdb/cockroach/pull/146975 -[#147223]: https://github.com/cockroachdb/cockroach/pull/147223 -[#147417]: https://github.com/cockroachdb/cockroach/pull/147417 -[#146887]: https://github.com/cockroachdb/cockroach/pull/146887 -[#147671]: https://github.com/cockroachdb/cockroach/pull/147671 -[#147350]: https://github.com/cockroachdb/cockroach/pull/147350 -[#147376]: https://github.com/cockroachdb/cockroach/pull/147376 -[#147438]: https://github.com/cockroachdb/cockroach/pull/147438 -[#147460]: https://github.com/cockroachdb/cockroach/pull/147460 -[#147532]: https://github.com/cockroachdb/cockroach/pull/147532 -[#147563]: https://github.com/cockroachdb/cockroach/pull/147563 -[#146977]: https://github.com/cockroachdb/cockroach/pull/146977 -[#147711]: https://github.com/cockroachdb/cockroach/pull/147711 -[#147083]: https://github.com/cockroachdb/cockroach/pull/147083 -[#147549]: https://github.com/cockroachdb/cockroach/pull/147549 diff --git a/src/current/_includes/releases/v25.2/v25.2.3.md b/src/current/_includes/releases/v25.2/v25.2.3.md index fa12edf8aea..6eddd1a6c05 100644 --- a/src/current/_includes/releases/v25.2/v25.2.3.md +++ b/src/current/_includes/releases/v25.2/v25.2.3.md @@ -7,36 +7,36 @@ Release Date: July 28, 2025

General changes

- Changefeeds emitting to Kafka sinks that were created in CockroachDB v24.2.1+, or v23.2.10+ and v24.1.4+ with the `changefeed.new_kafka_sink.enabled` cluster setting enabled now include the message key, size, and MVCC timestamp in message too large error logs. - [#147929][#147929] + #147929

SQL language changes

- Added the metrics `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count`, which count the number of automatic retries of SQL transactions and statements, respectively, within the database. These metrics differ from the related `txn.restarts.*` metrics, which count retryable errors emitted by the KV layer that must be retried. The new `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count` metrics count auto-retry actions taken by the SQL layer in response to some of those retryable errors. - [#148207][#148207] + #148207 - Added a session variable `initial_retry_backoff_for_read_committed` that controls the initial backoff duration when retrying an individual statement in an explicit `READ COMMITTED` transaction. A duration of `0` disables exponential backoff. If a statement in an explicit `READ COMMITTED` transaction is failing with the `40001` error `ERROR: restart transaction: read committed retry limit exceeded; set by max_retries_for_read_committed=...`, then you should set `initial_retry_backoff_for_read_committed` to a duration proportional to the typical execution time of the statement (in addition to also increasing `max_retries_for_read_committed`). - [#148207][#148207] + #148207

DB Console changes

- Updated the "Learn more" link on the **Hot Ranges** page to direct users to a newer, more comprehensive reference guide about hotspots. - [#148522][#148522] + #148522

Bug fixes

- Fixed a data race in the `cloudstorage` sink. - [#147163][#147163] + #147163 - Fixed a bug where searching a vector with a query vector that doesn't match the dimensions of the vector column in the table would cause a node to crash. - [#147875][#147875] + #147875 - Fixed a bug where functions lost their row-level security (RLS) policy backreferences, leading to schema change failures. - [#147905][#147905] + #147905 - Fixed an error in `crdb_internal.table_spans` that could occur when a table's schema had been dropped. - [#147977][#147977] + #147977 - Fixed a bug where adding multiple columns in a single statement with `AddGeometryColumn` would cause runtime errors. - [#148146][#148146] + #148146 - Fixed a bug where `libpq` clients using the async API could hang with large result sets (Python: psycopg; Ruby: ActiveRecord, ruby-pg). - [#148468][#148468] + #148468 - Previously, CockroachDB could hit an internal error when performing a `DELETE`, `UPDATE`, or `UPSERT` where the initial scan of the mutation is locking and is on a table different from the one being mutated. A possible workaround was `SET enable_implicit_select_for_update = false`, but this could increase contention. The bug was introduced in v25.2 and is now fixed. - [#149301][#149301] + #149301 - The `RESET ALL` statement no longer affects the following session variables: - `is_superuser` - `role` @@ -47,38 +47,21 @@ Release Date: July 28, 2025 - `transaction_read_only` This better matches PostgreSQL behavior for `RESET ALL`. In addition, the `DISCARD ALL` statement no longer errors when `default_transaction_use_follower_reads` is enabled. - [#149388][#149388] + #149388 - In v25.1, automatic partial statistics collection was enabled by default (by setting the `sql.stats.automatic_partial_collection.enabled` cluster setting to `true`). Partial statistics collection may encounter certain expected scenarios that were previously reported as failed stats jobs with PostgreSQL error code `55000`. These errors are benign and are no longer reported. Instead, the stats job will be marked as "succeeded," though no new statistics will be created. - [#149625][#149625] + #149625 - Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (the gateway node of the plan was not affected), and could only be mitigated by restarting the node. - [#149921][#149921] + #149921 - Fixed an issue where some SQL metrics were not reported when `server.child_metrics.enabled` was enabled, `server.child_metrics.include_aggregate.enabled` was disabled, and `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` were also disabled. Specifically, metrics with no children now report their aggregate metrics regardless of the `server.child_metrics.include_aggregate.enabled` cluster setting. - [#150185][#150185] + #150185 - Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. - [#150338][#150338] + #150338 - Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. - [#150441][#150441] + #150441

Miscellaneous

- Restore no longer gets stuck in the reverting state after failed cleanup of dropped temporary system tables. - [#148485][#148485] + #148485 -[#148522]: https://github.com/cockroachdb/cockroach/pull/148522 -[#147977]: https://github.com/cockroachdb/cockroach/pull/147977 -[#148485]: https://github.com/cockroachdb/cockroach/pull/148485 -[#148207]: https://github.com/cockroachdb/cockroach/pull/148207 -[#147875]: https://github.com/cockroachdb/cockroach/pull/147875 -[#147905]: https://github.com/cockroachdb/cockroach/pull/147905 -[#149625]: https://github.com/cockroachdb/cockroach/pull/149625 -[#150441]: https://github.com/cockroachdb/cockroach/pull/150441 -[#147929]: https://github.com/cockroachdb/cockroach/pull/147929 -[#147163]: https://github.com/cockroachdb/cockroach/pull/147163 -[#150185]: https://github.com/cockroachdb/cockroach/pull/150185 -[#148146]: https://github.com/cockroachdb/cockroach/pull/148146 -[#148468]: https://github.com/cockroachdb/cockroach/pull/148468 -[#149301]: https://github.com/cockroachdb/cockroach/pull/149301 -[#149388]: https://github.com/cockroachdb/cockroach/pull/149388 -[#149921]: https://github.com/cockroachdb/cockroach/pull/149921 -[#150338]: https://github.com/cockroachdb/cockroach/pull/150338 diff --git a/src/current/_includes/releases/v25.2/v25.2.4.md b/src/current/_includes/releases/v25.2/v25.2.4.md index c6c6a2e1bfd..8307f1eb536 100644 --- a/src/current/_includes/releases/v25.2/v25.2.4.md +++ b/src/current/_includes/releases/v25.2/v25.2.4.md @@ -7,7 +7,6 @@ Release Date: August 1, 2025

Bug fixes

- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. - [#151095][#151095] + #151095 -[#151095]: https://github.com/cockroachdb/cockroach/pull/151095 diff --git a/src/current/_includes/releases/v25.2/v25.2.5.md b/src/current/_includes/releases/v25.2/v25.2.5.md index 1224dcff43c..5bb538d0555 100644 --- a/src/current/_includes/releases/v25.2/v25.2.5.md +++ b/src/current/_includes/releases/v25.2/v25.2.5.md @@ -8,50 +8,34 @@ Release Date: August 22, 2025 - Kafka v2 changefeed sinks now support a cluster setting that enables detailed error logging for messages - exceeding Kafka v2 size limit. [#149829][#149829] + exceeding Kafka v2 size limit. #149829

Operational changes

-- Introduced a cluster setting, `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. [#149837][#149837] +- Introduced a cluster setting, `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. #149837

Bug fixes

-- Fixed an issue where the mvcc_timestamp field was incorrectly returning zero values when used with CDC queries. The timestamp is now emitted correctly. [#147114][#147114] +- Fixed an issue where the mvcc_timestamp field was incorrectly returning zero values when used with CDC queries. The timestamp is now emitted correctly. #147114 - Fixed a bug where database login could fail during LDAP, JWT, or OIDC authentication if the user's external group memberships did not correspond to any existing roles in the database. The login will now succeed, and no roles will be granted or - revoked in this scenario. [#149747][#149747] -- Fixed a bug that would cause a `CALL` statement executed via a portal in the extended wire protocol to result in an error like `unknown portal ""` if the stored procedure contained `COMMIT` or `ROLLBACK` statements. The bug had existed since PL/pgSQL transaction control statements were introduced in v24.1. The fix will be off by default in versions prior to v25.3, and can be toggled on by setting `use_proc_txn_control_extended_protocol_fix = true`. [#149851][#149851] -- Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (the gateway node of the plan was not affected), and could only be mitigated by restarting the node. [#149919][#149919] -- Fixed an issue where some SQL metrics were not reported when `server.child_metrics.enabled` was enabled, `server.child_metrics.include_aggregate.enabled` was disabled, and `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` were also disabled. Specifically, metrics with no children now report their aggregate metrics regardless of the `server.child_metrics.include_aggregate.enabled` cluster setting. [#149937][#149937] -- Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. [#150295][#150295] -- Fixed an issue where discarding zone configs on sequences did not actually remove the configuration. [#150359][#150359] -- Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. [#150439][#150439] -- Fixed invalid zone configurations that were generated when adding a super region to a 3-region database with a secondary region and region survivability. Previously, this could result in assigning more than the allowed number of replicas. [#150619][#150619] -- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. [#151082][#151082] -- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. [#151146][#151146] -- Previously, CockroachDB could encounter an internal error `trying to add a column of UNKNOWN type at ...` in rare cases when handling `CASE` or `OR` operations. This bug was present since v20.2 and is now fixed. [#151161][#151161] -- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. [#152185][#152185] + revoked in this scenario. #149747 +- Fixed a bug that would cause a `CALL` statement executed via a portal in the extended wire protocol to result in an error like `unknown portal ""` if the stored procedure contained `COMMIT` or `ROLLBACK` statements. The bug had existed since PL/pgSQL transaction control statements were introduced in v24.1. The fix will be off by default in versions prior to v25.3, and can be toggled on by setting `use_proc_txn_control_extended_protocol_fix = true`. #149851 +- Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (the gateway node of the plan was not affected), and could only be mitigated by restarting the node. #149919 +- Fixed an issue where some SQL metrics were not reported when `server.child_metrics.enabled` was enabled, `server.child_metrics.include_aggregate.enabled` was disabled, and `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` were also disabled. Specifically, metrics with no children now report their aggregate metrics regardless of the `server.child_metrics.include_aggregate.enabled` cluster setting. #149937 +- Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. #150295 +- Fixed an issue where discarding zone configs on sequences did not actually remove the configuration. #150359 +- Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. #150439 +- Fixed invalid zone configurations that were generated when adding a super region to a 3-region database with a secondary region and region survivability. Previously, this could result in assigning more than the allowed number of replicas. #150619 +- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to be undetected, potentially causing incomplete backups. #151082 +- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. #151146 +- Previously, CockroachDB could encounter an internal error `trying to add a column of UNKNOWN type at ...` in rare cases when handling `CASE` or `OR` operations. This bug was present since v20.2 and is now fixed. #151161 +- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. #152185

Miscellaneous

-- Upgrade to Go 1.23.11 [#150988][#150988] - - -[#149837]: https://github.com/cockroachdb/cockroach/pull/149837 -[#152185]: https://github.com/cockroachdb/cockroach/pull/152185 -[#150988]: https://github.com/cockroachdb/cockroach/pull/150988 -[#151082]: https://github.com/cockroachdb/cockroach/pull/151082 -[#151146]: https://github.com/cockroachdb/cockroach/pull/151146 -[#149919]: https://github.com/cockroachdb/cockroach/pull/149919 -[#149937]: https://github.com/cockroachdb/cockroach/pull/149937 -[#150619]: https://github.com/cockroachdb/cockroach/pull/150619 -[#150359]: https://github.com/cockroachdb/cockroach/pull/150359 -[#151161]: https://github.com/cockroachdb/cockroach/pull/151161 -[#147114]: https://github.com/cockroachdb/cockroach/pull/147114 -[#149851]: https://github.com/cockroachdb/cockroach/pull/149851 -[#150295]: https://github.com/cockroachdb/cockroach/pull/150295 -[#149829]: https://github.com/cockroachdb/cockroach/pull/149829 -[#149747]: https://github.com/cockroachdb/cockroach/pull/149747 -[#150439]: https://github.com/cockroachdb/cockroach/pull/150439 +- Upgrade to Go 1.23.11 #150988 + + diff --git a/src/current/_includes/releases/v25.2/v25.2.6.md b/src/current/_includes/releases/v25.2/v25.2.6.md index 5890f35e254..dc7831835c7 100644 --- a/src/current/_includes/releases/v25.2/v25.2.6.md +++ b/src/current/_includes/releases/v25.2/v25.2.6.md @@ -6,53 +6,37 @@ Release Date: September 22, 2025

SQL language changes

-- Added a new session variable, `disable_optimizer_rules`, which allows users to provide a comma-separated list of optimizer rules to disable during query optimization. This allows users to avoid rules that are known to create a suboptimal query plan for specific queries. [#152349][#152349] -- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. [#152600][#152600] +- Added a new session variable, `disable_optimizer_rules`, which allows users to provide a comma-separated list of optimizer rules to disable during query optimization. This allows users to avoid rules that are known to create a suboptimal query plan for specific queries. #152349 +- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. #152600

Operational changes

-- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. [#151485][#151485] +- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. #151485

Bug fixes

-- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. [#151224][#151224] -- Addressed a bug on `schema_locked` tables when a column is dropped, and `schema_locked` is toggled for the user. [#151528][#151528] -- Fixed a bug that could cause excessive memory allocations when compacting timeseries keys. [#151814][#151814] -- Fixed a bug where `DROP USER` succeeded even though a role owned default privileges, which could leave invalid privilege entries in the system. [#151879][#151879] -- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. [#152184][#152184] -- Fixed a bug where `SHOW TABLES` would show inaccurate row counts if the most recent statistics collection was partial. [#152186][#152186] -- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. [#152314][#152314] +- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. #151224 +- Addressed a bug on `schema_locked` tables when a column is dropped, and `schema_locked` is toggled for the user. #151528 +- Fixed a bug that could cause excessive memory allocations when compacting timeseries keys. #151814 +- Fixed a bug where `DROP USER` succeeded even though a role owned default privileges, which could leave invalid privilege entries in the system. #151879 +- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. #152184 +- Fixed a bug where `SHOW TABLES` would show inaccurate row counts if the most recent statistics collection was partial. #152186 +- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. #152314 - Fixed a bug that allowed foreign-key violations to result from some combinations of concurrent `READ COMMITTED` and `SERIALIZABLE` transactions. If both `SERIALIZABLE` and weaker-isolation transactions will concurrently modify rows involved in foreign-key relationships, the `SERIALIZABLE` transactions must have the following session variables set in order to prevent any possible foreign-key violations: - `SET enable_implicit_fk_locking_for_serializable = on;` - `SET enable_shared_locking_for_serializable = on;` - - `SET enable_durable_locking_for_serializable = on;` [#152375][#152375] -- Added an automatic repair for dangling or invalid entries in the `system.comments` table. [#152471][#152471] -- Added the `use_soft_limit_for_distribute_scan` session variable (default: `false`), which controls whether CockroachDB uses the soft row count estimate when deciding whether an execution plan should be distributed. In v25.1, the physical planning heuristics were changed such that large constrained table scans, estimated to scan at least 10,000 rows (controlled via `distribute_scan_row_count_threshold`), would force plan distribution when `distsql=auto`. However, if the scan had a "soft limit" CockroachDB would still use the full estimate (for example, `10,000` in `estimated row count: 100–10,000`), sometimes unnecessarily distributing queries and increasing latency. The `use_soft_limit_for_distribute_scan` session variable addresses this by allowing the planner to use the soft limit when deciding whether a scan is "large". [#152559][#152559] -- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. [#152679][#152679] + - `SET enable_durable_locking_for_serializable = on;` #152375 +- Added an automatic repair for dangling or invalid entries in the `system.comments` table. #152471 +- Added the `use_soft_limit_for_distribute_scan` session variable (default: `false`), which controls whether CockroachDB uses the soft row count estimate when deciding whether an execution plan should be distributed. In v25.1, the physical planning heuristics were changed such that large constrained table scans, estimated to scan at least 10,000 rows (controlled via `distribute_scan_row_count_threshold`), would force plan distribution when `distsql=auto`. However, if the scan had a "soft limit" CockroachDB would still use the full estimate (for example, `10,000` in `estimated row count: 100–10,000`), sometimes unnecessarily distributing queries and increasing latency. The `use_soft_limit_for_distribute_scan` session variable addresses this by allowing the planner to use the soft limit when deciding whether a scan is "large". #152559 +- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. #152679

Performance improvements

-- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. [#152631][#152631] +- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. #152631

Miscellaneous

- Tunes S3 client retry behavior to be more reliable in the - presence of correlated errors. [#151874][#151874] - - -[#151224]: https://github.com/cockroachdb/cockroach/pull/151224 -[#152314]: https://github.com/cockroachdb/cockroach/pull/152314 -[#152375]: https://github.com/cockroachdb/cockroach/pull/152375 -[#152631]: https://github.com/cockroachdb/cockroach/pull/152631 -[#152349]: https://github.com/cockroachdb/cockroach/pull/152349 -[#151485]: https://github.com/cockroachdb/cockroach/pull/151485 -[#152679]: https://github.com/cockroachdb/cockroach/pull/152679 -[#151814]: https://github.com/cockroachdb/cockroach/pull/151814 -[#151879]: https://github.com/cockroachdb/cockroach/pull/151879 -[#152471]: https://github.com/cockroachdb/cockroach/pull/152471 -[#151874]: https://github.com/cockroachdb/cockroach/pull/151874 -[#152600]: https://github.com/cockroachdb/cockroach/pull/152600 -[#151528]: https://github.com/cockroachdb/cockroach/pull/151528 -[#152559]: https://github.com/cockroachdb/cockroach/pull/152559 -[#152184]: https://github.com/cockroachdb/cockroach/pull/152184 -[#152186]: https://github.com/cockroachdb/cockroach/pull/152186 + presence of correlated errors. #151874 + + diff --git a/src/current/_includes/releases/v25.2/v25.2.7.md b/src/current/_includes/releases/v25.2/v25.2.7.md index b0daaf57abf..388bfefb8ac 100644 --- a/src/current/_includes/releases/v25.2/v25.2.7.md +++ b/src/current/_includes/releases/v25.2/v25.2.7.md @@ -6,21 +6,13 @@ Release Date: October 17, 2025

Bug fixes

-- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. [#152964][#152964] -- Fixed a bug where index creation could fail due to validation errors if the schema change was retried or paused/resumed during the backfill. [#153596][#153596] -- Fixed a bug introduced in v25.1.0 that would cause a node panic if a `SIGINT` signal was sent during the execution of a `CHECK EXTERNAL CONNECTION` command. [#153601][#153601] -- Fixed a bug where `ALTER POLICY` was incorrectly dropping dependency tracking for functions, sequences, or types in policy expressions. [#153808][#153808] -- Fixed a runtime error that could be hit if a new secondary index had a name collision with a primary index. [#154015][#154015] -- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. [#154290][#154290] -- Disabled the `kv.lock_table.unreplicated_lock_reliability.split.enabled` feature, which could lead to a node crash. [#155418][#155418] -- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. [#154398][#154398] +- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. #152964 +- Fixed a bug where index creation could fail due to validation errors if the schema change was retried or paused/resumed during the backfill. #153596 +- Fixed a bug introduced in v25.1.0 that would cause a node panic if a `SIGINT` signal was sent during the execution of a `CHECK EXTERNAL CONNECTION` command. #153601 +- Fixed a bug where `ALTER POLICY` was incorrectly dropping dependency tracking for functions, sequences, or types in policy expressions. #153808 +- Fixed a runtime error that could be hit if a new secondary index had a name collision with a primary index. #154015 +- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. #154290 +- Disabled the `kv.lock_table.unreplicated_lock_reliability.split.enabled` feature, which could lead to a node crash. #155418 +- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. #154398 -[#153808]: https://github.com/cockroachdb/cockroach/pull/153808 -[#154015]: https://github.com/cockroachdb/cockroach/pull/154015 -[#154290]: https://github.com/cockroachdb/cockroach/pull/154290 -[#155418]: https://github.com/cockroachdb/cockroach/pull/155418 -[#154398]: https://github.com/cockroachdb/cockroach/pull/154398 -[#152964]: https://github.com/cockroachdb/cockroach/pull/152964 -[#153596]: https://github.com/cockroachdb/cockroach/pull/153596 -[#153601]: https://github.com/cockroachdb/cockroach/pull/153601 diff --git a/src/current/_includes/releases/v25.2/v25.2.8.md b/src/current/_includes/releases/v25.2/v25.2.8.md index f8bd938f294..ea4dbb7b718 100644 --- a/src/current/_includes/releases/v25.2/v25.2.8.md +++ b/src/current/_includes/releases/v25.2/v25.2.8.md @@ -6,11 +6,9 @@ Release Date: October 30, 2025

Operational changes

-- In order to selectively capture traces for transactions running in an active workload without having to capture them via statement diagnostic bundles, customers can now use the `sql.trace.txn.sample_rate` cluster setting to enable tracing for a fraction of their workload. The `sql.trace.txn.enable_threshold` will still need to be set to a positive value to provide a filter for how slow a transaction needs to be after being sampled to merit emitting a trace. Traces are emitted to the `SQL_EXEC` logging channel. [#156409][#156409] +- In order to selectively capture traces for transactions running in an active workload without having to capture them via statement diagnostic bundles, customers can now use the `sql.trace.txn.sample_rate` cluster setting to enable tracing for a fraction of their workload. The `sql.trace.txn.enable_threshold` will still need to be set to a positive value to provide a filter for how slow a transaction needs to be after being sampled to merit emitting a trace. Traces are emitted to the `SQL_EXEC` logging channel. #156409

Bug fixes

-- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. [#156312][#156312] +- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. #156312 -[#156409]: https://github.com/cockroachdb/cockroach/pull/156409 -[#156312]: https://github.com/cockroachdb/cockroach/pull/156312 diff --git a/src/current/_includes/releases/v25.2/v25.2.9.md b/src/current/_includes/releases/v25.2/v25.2.9.md index cb9d401edf0..04dd68b0f25 100644 --- a/src/current/_includes/releases/v25.2/v25.2.9.md +++ b/src/current/_includes/releases/v25.2/v25.2.9.md @@ -6,14 +6,12 @@ Release Date: November 14, 2025

SQL language changes

-- Added the `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. [#156595][#156595] -- Added the `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. [#156595][#156595] -- Added the `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. [#156595][#156595] -- Added the `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. [#156595][#156595] +- Added the `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. #156595 +- Added the `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. #156595 +- Added the `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. #156595 +- Added the `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. #156595

Bug fixes

-- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. [#156552][#156552] +- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. #156552 -[#156595]: https://github.com/cockroachdb/cockroach/pull/156595 -[#156552]: https://github.com/cockroachdb/cockroach/pull/156552 diff --git a/src/current/_includes/releases/v25.3/cluster-setting-changes.md b/src/current/_includes/releases/v25.3/cluster-setting-changes.md index 31a33f1e515..1b99683d976 100644 --- a/src/current/_includes/releases/v25.3/cluster-setting-changes.md +++ b/src/current/_includes/releases/v25.3/cluster-setting-changes.md @@ -2,19 +2,14 @@ Changes to [cluster settings]({% link v25.3/cluster-settings.md %}) should be re
New settings
-- `sql.metrics.application_name.enabled` - Default to `false` and can be set to `true` to display the application name on supported metrics. [#144610][#144610] -- `sql.metrics.database_name.enabled` - Default to `false` and can be set to `true` to display the database name on supported metrics. [#144610][#144610] -- `sql.sqlcommenter.enabled` - This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). [#145435][#145435] +- `sql.metrics.application_name.enabled` - Default to `false` and can be set to `true` to display the application name on supported metrics. #144610 +- `sql.metrics.database_name.enabled` - Default to `false` and can be set to `true` to display the database name on supported metrics. #144610 +- `sql.sqlcommenter.enabled` - This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). #145435 - `sql.trace.txn.sample_rate` and `sql.trace.txn.enable_threshold` - In order to selectively capture traces for transactions running in an active workload without having to capture them via statement diagnostic bundles, customers can now use the `sql.trace.txn.sample_rate` cluster setting to enable tracing for a fraction of their workload. The `sql.trace.txn.enable_threshold` will still need to be set to a positive value to provide a filter for how slow a transaction needs to be after being sampled to merit emitting a trace. Traces are emitted to the `SQL_EXEC` logging channel.
Setting changes
-- The value of `sql.stats.error_on_concurrent_create_stats.enabled` now defaults to `false`, suppressing error counters for auto stats jobs that fail due to concurrent stats jobs in progress. [#149857][#149857] -- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. [#144181][#144181] -- To prevent unnecessary queuing in admission control CPU queues, the `goschedstats.always_use_short_sample_period.enabled` setting default was changed to `true` [#146014][#146014] +- The value of `sql.stats.error_on_concurrent_create_stats.enabled` now defaults to `false`, suppressing error counters for auto stats jobs that fail due to concurrent stats jobs in progress. #149857 +- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. #144181 +- To prevent unnecessary queuing in admission control CPU queues, the `goschedstats.always_use_short_sample_period.enabled` setting default was changed to `true` #146014 -[#144181]: https://github.com/cockroachdb/cockroach/pull/144181 -[#144610]: https://github.com/cockroachdb/cockroach/pull/144610 -[#145435]: https://github.com/cockroachdb/cockroach/pull/145435 -[#146014]: https://github.com/cockroachdb/cockroach/pull/146014 -[#149857]: https://github.com/cockroachdb/cockroach/pull/149857 diff --git a/src/current/_includes/releases/v25.3/deprecations.md b/src/current/_includes/releases/v25.3/deprecations.md index bd78876be3b..e23a8afd853 100644 --- a/src/current/_includes/releases/v25.3/deprecations.md +++ b/src/current/_includes/releases/v25.3/deprecations.md @@ -1,10 +1,6 @@ The following deprecations/removals are announced in v25.3. -- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. [#144181][#144181] -- `IMPORT TABLE` as well `PGDUMP` and `MYSQLDUMP` formats of `IMPORT` are now fully removed. These have been deprecated since v23.2. [#148248][#148248] +- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. #144181 +- `IMPORT TABLE` as well `PGDUMP` and `MYSQLDUMP` formats of `IMPORT` are now fully removed. These have been deprecated since v23.2. #148248 - Removed the 'started' column in `SHOW JOBS`, which was a duplicate of the 'created' column. - [#148464][#148464] - -[#144181]: https://github.com/cockroachdb/cockroach/pull/144181 -[#148248]: https://github.com/cockroachdb/cockroach/pull/148248 -[#148464]: https://github.com/cockroachdb/cockroach/pull/148464 \ No newline at end of file + #148464 diff --git a/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md b/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md index bb3ab0397b7..d1c04317405 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-alpha.1.md @@ -5,50 +5,50 @@ Release Date: June 9, 2025 {% include releases/new-release-downloads-docker-image.md release=include.release %}

Security updates

-- The client for the SQL connection will now receive an error along with an error in the `OPS` channel if trying to connect with an unsupported cipher. [#146522][#146522] +- The client for the SQL connection will now receive an error along with an error in the `OPS` channel if trying to connect with an unsupported cipher. #146522

General changes

- Enhanced the `/status/v2/hotranges` endpoint by adding two new filtering options: - `per_node_limit` (`int32`): Specifies the maximum number of hot ranges to return per node. Defaults to `128` if not set. - - `stats_only` (`bool`): When set to `true`, returns only the statistics for hot ranges without fetching descriptor information, such as databases, tables, and indexes. [#144091][#144091] -- Changefeeds now round down the progress of each range to 1 second, in order to cover more ranges in fine-grained checkpointing. [#146979][#146979] -- Reduced the maximum backoff for changefeed retries from 10 minutes to 1 minute, which results in faster recovery from transient errors. [#146448][#146448] -- The secret keys in Azure cloud storage URIs are now redacted. [#147022][#147022] + - `stats_only` (`bool`): When set to `true`, returns only the statistics for hot ranges without fetching descriptor information, such as databases, tables, and indexes. #144091 +- Changefeeds now round down the progress of each range to 1 second, in order to cover more ranges in fine-grained checkpointing. #146979 +- Reduced the maximum backoff for changefeed retries from 10 minutes to 1 minute, which results in faster recovery from transient errors. #146448 +- The secret keys in Azure cloud storage URIs are now redacted. #147022

SQL language changes

-- Added a new session variable `create_table_with_schema_locked`, which can be used to ensure all tables created by a session have the storage parameter `schema_locked` set. [#143892][#143892] +- Added a new session variable `create_table_with_schema_locked`, which can be used to ensure all tables created by a session have the storage parameter `schema_locked` set. #143892 - The following syntax is now supported: - `GRANT ... ON ALL ROUTINES IN SCHEMA ...` - `REVOKE ... ON ALL ROUTINES IN SCHEMA ...` - `ALTER DEFAULT PRIVILEGES GRANT ... ON ROUTINES ...` - `ALTER DEFAULT PRIVILEGES REVOKE ... ON ROUTINES ...` - The `ROUTINES` keyword makes the command apply to both functions and stored procedures. Note that `ALTER DEFAULT PRIVILEGES ... ON FUNCTIONS` already applied to stored procedures (which aligns with the PostgreSQL behavior), and that is not changing. [#144189][#144189] -- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). [#144522][#144522] -- Added new cluster settings: `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled`. These settings default to `false` and can be set to `true` to display the application name and database name, respectively, on supported metrics. [#144610][#144610] + The `ROUTINES` keyword makes the command apply to both functions and stored procedures. Note that `ALTER DEFAULT PRIVILEGES ... ON FUNCTIONS` already applied to stored procedures (which aligns with the PostgreSQL behavior), and that is not changing. #144189 +- The variable arguments of polymorphic built-in functions (e.g., `concat`, `num_nulls`, `format`, `concat_ws`) no longer need to have the same type, matching PostgreSQL behavior. As a result, CockroachDB's type inference engine will no longer be able to infer argument types in some cases where it previously could, and there is a possibility that CockroachDB applications will encounter new errors. The new session variable `use_pre_25_2_variadic_builtins` restores the previous behavior (and limitations). #144522 +- Added new cluster settings: `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled`. These settings default to `false` and can be set to `true` to display the application name and database name, respectively, on supported metrics. #144610 - Added support for query tagging, which allows users to add query tags to their SQL statements via comments. These query tags are included in: - All log entries generated during the execution of a SQL statement and are prefixed by `querytag-`. - Traces and are prefixed by `querytag-`. - In the `crdb_internal.cluster_execution_insights` and `crdb_internal.node_execution_insights` virtual tables in a new `query_tags` JSONB column. - This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). [#145435][#145435] -- `~~*` and `!~~*` are now supported aliases for `ILIKE` and `NOT ILIKE`. [#146764][#146764] -- The `information_schema.triggers` table is now populated with trigger metadata. Users can query this table to see all triggers defined in their database, including the trigger name, timing (`BEFORE`/`AFTER`), event type (`INSERT`/`UPDATE`/`DELETE`), and associated function. Each trigger event appears as a separate row in the table. [#147237][#147237] -- The `pg_catalog.pg_trigger` table now returns metadata about database triggers. [#147248][#147248] -- Deterministic collations are now supported with `LIKE`. A deterministic collation considers strings to be equal only if they consist of the same byte sequence. [#147045][#147045] -- Assigning to an element of a composite-typed variable in a PL/pgSQL routine now respects case-sensitivity rules. For example, a field named `"FOO_Bar"` can be assigned like `NEW."FOO_Bar" = 100`. [#143579][#143579] + This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). #145435 +- `~~*` and `!~~*` are now supported aliases for `ILIKE` and `NOT ILIKE`. #146764 +- The `information_schema.triggers` table is now populated with trigger metadata. Users can query this table to see all triggers defined in their database, including the trigger name, timing (`BEFORE`/`AFTER`), event type (`INSERT`/`UPDATE`/`DELETE`), and associated function. Each trigger event appears as a separate row in the table. #147237 +- The `pg_catalog.pg_trigger` table now returns metadata about database triggers. #147248 +- Deterministic collations are now supported with `LIKE`. A deterministic collation considers strings to be equal only if they consist of the same byte sequence. #147045 +- Assigning to an element of a composite-typed variable in a PL/pgSQL routine now respects case-sensitivity rules. For example, a field named `"FOO_Bar"` can be assigned like `NEW."FOO_Bar" = 100`. #143579

Operational changes

-- Prometheus metrics are now also available at the `/metrics` endpoint, in addition to the existing `/_status/vars` endpoint. The new `/metrics` endpoint emits statically labeled metrics and will evolve more rapidly as CockroachDB migrates metrics to use labels instead of defining different metric names. For compatibility, users can continue to use `/_status/vars`, where metric names will remain stable. [#143536][#143536] -- Added the new latency metrics: `sql.service.latency.historical`, `sql.service.latency.consistent`, `sql.exec.latency.historical`, and `sql.exec.latency.consistent` for easier query optimizations. [#142826][#142826] -- Partial index schema changes are supported in replicating tables when `logical_replication.consumer.immediate_mode_writer` is not set to `legacy-kv`. [#144508][#144508] -- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. [#144181][#144181] -- Logs for hot ranges (`hot_ranges_stats` events) have been moved to the `HEALTH` logging channel. [#144567][#144567] -- Added a new metric, `kv.loadsplitter.cleardirection`, which increments when the load-based splitter observes that more than 80% of replica access samples are moving in a single direction (either left/descending or right/ascending). [#143927][#143927] -- When the `server.telemetry.hot_ranges_stats.enabled` cluster setting is enabled, nodes check for hot ranges every minute instead of every 4 hours. A node logs its hot ranges when any single replica exceeds 250 ms of CPU time per second. In multi-tenant deployments, the check runs every 5 minutes and logs hot ranges for the entire cluster. [#144414][#144414] -- Added the metric `changefeed.checkpoint.timestamp_count` that measures the number of unique timestamps in a changefeed span-level checkpoint. It may be useful to monitor this metric to determine if quantization settings should be changed. [#145117][#145117] +- Prometheus metrics are now also available at the `/metrics` endpoint, in addition to the existing `/_status/vars` endpoint. The new `/metrics` endpoint emits statically labeled metrics and will evolve more rapidly as CockroachDB migrates metrics to use labels instead of defining different metric names. For compatibility, users can continue to use `/_status/vars`, where metric names will remain stable. #143536 +- Added the new latency metrics: `sql.service.latency.historical`, `sql.service.latency.consistent`, `sql.exec.latency.historical`, and `sql.exec.latency.consistent` for easier query optimizations. #142826 +- Partial index schema changes are supported in replicating tables when `logical_replication.consumer.immediate_mode_writer` is not set to `legacy-kv`. #144508 +- The cluster setting `server.client_cert_expiration_cache.capacity` has been deprecated. The client certificate cache now evicts client certificates based on expiration time. #144181 +- Logs for hot ranges (`hot_ranges_stats` events) have been moved to the `HEALTH` logging channel. #144567 +- Added a new metric, `kv.loadsplitter.cleardirection`, which increments when the load-based splitter observes that more than 80% of replica access samples are moving in a single direction (either left/descending or right/ascending). #143927 +- When the `server.telemetry.hot_ranges_stats.enabled` cluster setting is enabled, nodes check for hot ranges every minute instead of every 4 hours. A node logs its hot ranges when any single replica exceeds 250 ms of CPU time per second. In multi-tenant deployments, the check runs every 5 minutes and logs hot ranges for the entire cluster. #144414 +- Added the metric `changefeed.checkpoint.timestamp_count` that measures the number of unique timestamps in a changefeed span-level checkpoint. It may be useful to monitor this metric to determine if quantization settings should be changed. #145117 - In a physical cluster replication (PCR) deployment, it is not possible for the standby system virtual cluster, or the reader virtual cluster to upgrade the reader virtual cluster by setting the version cluster setting. It is necessary to: 1. Upgrade the standby system virtual cluster. 1. Upgrade the primary system virtual cluster. @@ -56,156 +56,74 @@ Release Date: June 9, 2025 1. Wait for the replicated time to advance past the time the primary virtual cluster upgraded. 1. Shut down the reader virtual cluster. 1. Upgrade the destination host cluster. - 1. Re-initialize the reader virtual cluster with `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER`. [#146127][#146127] -- Added job tracing support to changefeeds. [#144412][#144412] + 1. Re-initialize the reader virtual cluster with `ALTER VIRTUAL CLUSTER SET REPLICATION READ VIRTUAL CLUSTER`. #146127 +- Added job tracing support to changefeeds. #144412

Command-line changes

-- Node attributes (`attrs`) will now appear in the `node status` CLI command. [#143421][#143421] -- Updated the `\d ` command to show policy and Row Level Security information similar to what is shown in the output of `SHOW CREATE TABLE`. [#146215][#146215] -- Added the `--validate-zip-file` flag to the `cockroach debug zip` command. This flag performs a quick validation check to ensure that the generated zip file is not corrupted. The flag is enabled by default. [#146192][#146192] -- The SQL shell now supports the compact output mode when `auto_trace` is enabled. [#146432][#146432] +- Node attributes (`attrs`) will now appear in the `node status` CLI command. #143421 +- Updated the `\d
` command to show policy and Row Level Security information similar to what is shown in the output of `SHOW CREATE TABLE`. #146215 +- Added the `--validate-zip-file` flag to the `cockroach debug zip` command. This flag performs a quick validation check to ensure that the generated zip file is not corrupted. The flag is enabled by default. #146192 +- The SQL shell now supports the compact output mode when `auto_trace` is enabled. #146432

DB Console changes

-- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. [#144101][#144101] -- Updated the titles of the disk throughput graphs on the Metrics page Hardware dashboard to display only "Bytes/s" instead of including a specific magnitude, "MiB/s". The titles of the graphs are now “"Disk Read Bytes/s" and "Disk Write Bytes/s". [#147462][#147462] +- Schema insights that recommend replacing an index were previously a two-statement command consisting of a `CREATE INDEX` and a `DROP INDEX` statement. When these two DDL statements were run as a single batched command, it was possible for one statement to succeed and one to fail. This is because DDL statements do not have the same atomicity guarantees as other SQL statements in CockroachDB. Index-replacement insights are now a single `CREATE INDEX` statement followed by a comment with additional DDL statements to be run manually: an `ALTER INDEX ... NOT VISIBLE` statement, which makes the old index invisible to the optimizer, followed by a `DROP INDEX` statement that should only be run after making the old index invisible and verifying that workload performance is satisfactory. #144101 +- Updated the titles of the disk throughput graphs on the Metrics page Hardware dashboard to display only "Bytes/s" instead of including a specific magnitude, "MiB/s". The titles of the graphs are now “"Disk Read Bytes/s" and "Disk Write Bytes/s". #147462

Bug fixes

-- Fixed a bug where using values `changefeed.aggregator.flush_jitter`, `min_checkpoint_frequency` such that `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. [#144304][#144304] -- Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. [#144427][#144427] -- Improved the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. [#144900][#144900] -- Fixed a bug where running `DROP INDEX` on a hash-sharded index did not properly detect dependencies from functions and procedures on the shard column. This caused the `DROP INDEX` statement to fail with an internal validation error. Now the statement returns a correct error message, and using `DROP INDEX ... CASCADE` works as expected by dropping the dependent functions and procedures. [#145107][#145107] -- Fixed a bug that prevented variable references using ordinal syntax (like `$1`) from reflecting updates to the variable. Referencing variables declared in PL/pgSQL blocks (instead of parameters) via ordinal syntax is now disallowed. The bug had existed since v24.1. [#144347][#144347] -- Fixed a bug that caused index expression elements of primary keys to be shown incorrectly in the output of `SHOW CREATE TABLE`. [#144716][#144716] -- Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. [#145462][#145462] -- Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. [#145481][#145481] -- Fixed a bug that could cause a row-level TTL job to fail with the error "comparison of two different versions of enum" if an `ENUM` type referenced by the table experienced a schema change. [#145374][#145374] -- Fixed a bug where the physical cluster replication (PCR) reader catalog job could hit validation errors when schema objects had dependencies between them (for example, when a sequence's default expression was being removed). [#145972][#145972] -- Creating a vector index on a table that contains a `NULL` vector value will no longer cause an internal error. [#145983][#145983] -- Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. [#145551][#145551] -- Row-level security (RLS) `SELECT` policies during `UPDATE` operations are now only applied when referenced columns appear in the `SET` or `WHERE` clauses, matching the behavior of PostgreSQL. This improves compatibility. [#145344][#145344] -- Fixed an issue where using inline log configuration could cause internal errors on the DB Console's Logs page for a node at `#/node/{nodeID}/logs`. [#145329][#145329] -- Fixed an integer overflow in the `split_part` function when using extremely negative field positions like Go's `math.MinInt64`. [#146271][#146271] -- Fixed incorrect application of `SELECT` policies to `RETURNING` clauses in `INSERT` and `UPDATE` when no table columns were referenced. [#145890][#145890] -- Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. [#146287][#146287] -- Fixed a bug where `ALTER TABLE` operations with multiple commands could generate invalid zone configurations. [#146369][#146369] -- Fixed a bug where an invalid comment in the `system.comment` table for a schema object could make it inaccessible. [#146213][#146213] -- Fixed a bug where a CockroachDB node could crash when executing `DO` statements that contain currently unsupported DDL statements like `CREATE TYPE` in a non-default configuration (additional logging needed to be enabled, e.g., via the `sql.log.all_statements.enabled` cluster setting). This bug was introduced in v25.1. [#146406][#146406] -- Prevent use of future timestamps when using `AS OF SYSTEM TIME` with `CREATE TABLE ... AS` and materialized views. Previously, such timestamps could cause errors, delays, or hangs. [#146446][#146446] -- Fixed an internal error that could be hit when `ADD COLUMN UNIQUE` and `ALTER PRIMARY KEY` were executed within the same transaction. [#146567][#146567] -- Fixed a bug that prevented temporary views and sequences from being created if the `pg_temp` schema was explicitly specified in the qualified name of the object being created. [#146586][#146586] -- Fixed a bug where CockroachDB would not use the vectorized fast path for `COPY` when it was supported. The bug was only present in previous v25.2 releases. [#146696][#146696] -- Errors triggered by DB Console activity no longer cause the node to crash. [#145563][#145563] -- Fixed a bug to prevent HTTP connections from stopping server shutdown. [#146744][#146744] -- The MVCC timestamp is now emitted correctly when the `mvcc_timestamp` is used with CDC queries. [#146836][#146836] -- Fixed a bug in v25.2.0 where a vector search operator could drop user-supplied filters if the same vector column was indexed twice and a vector index with no prefix columns was defined after a vector index with prefix columns. [#146259][#146259] -- Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). [#146883][#146883] -- Fixed a data race in the `cloudstorage` sink. [#146297][#146297] -- Fixed a bug where the `kv.rangefeed.closed_timestamp.slow_ranges` would not be incremented when a rangefeed closed timestamp was slower than the target threshold. [#146949][#146949] -- Fixed a bug that could cause an `AFTER` trigger to fail with `client already committed or rolled back the transaction` if the query also contained foreign-key cascades. The bug had existed since `AFTER` triggers were introduced in v24.3. [#146890][#146890] -- Prevent dropping columns or indexes that are still referenced by triggers. Previously, these operations could succeed silently, potentially breaking trigger functionality. [#146683][#146683] -- Fixed a bug where searching a vector with a query vector that doesn't match the dimensions of the vector column in the table would cause a node to crash. [#146848][#146848] -- Specifying types for a subset of columns in a generator function's column definition list now results in a syntax error instead of an internal error. [#145492][#145492] -- Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. [#147021][#147021] -- CockroachDB could previously incorrectly evaluate `to_regclass`, `to_regnamespace`, `to_regproc`, `to_regprocedure`, `to_regrole`, and `to_regtype` builtin functions when the query using them happened to be evaluated in distributed fashion. The bug has been present since the introduction of these builtins in v23.1 and is now fixed. [#147362][#147362] -- Fixed a bug that caused the optimizer to ignore index hints when optimizing some forms of prepared statements. This could result in one of two unexpected behaviors: a query errors with the message `index cannot be used for this query` when the index can actually be used; or a query uses an index that does not adhere to the hint. The hints relevant to this bug are regular index hints, e.g., `SELECT * FROM tab@index`, `FORCE_INVERTED_INDEX`, and `FORCE_ZIGZAG`. [#147368][#147368] -- Fixed a bug where the `pg_catalog.pg_policy` table could contain duplicate OID values when multiple tables had policies with the same policy ID. All rows in `pg_policy` now have unique OIDs as required. [#147373][#147373] -- Fixed a bug where the `rolbypassrls` column in `pg_roles` and `pg_authid` tables always returned false, even for roles with the `BYPASSRLS` option. [#147357][#147357] -- Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in point releases v23.2.22, v24.1.14, v24.3.9, and v25.1.2, and the v25.2 alpha. [#147187][#147187] -- Fixed an issue where updating child metrics and reinitializing metrics at the same time could cause scrape errors. [#147486][#147486] -- Fixed a runtime panic in the `substring_index` function that occurred when the count argument was the minimum 64-bit integer value. [#147546][#147546] -- Fixed a memory leak in index backfill jobs where completed spans were duplicated in memory on each progress update after resuming from a checkpoint. This could cause out-of-memory (OOM) errors when backfilling indexes on large tables with many ranges. This bug affected release version v25.2.0 and pre-release versions v25.2.0-alpha.3 through v25.2.0-rc.1. [#147511][#147511] -- Fixed a bug where prepared statements on schema changes could fail with runtime errors. [#147658][#147658] -- Fixed an issue with logical data replication (LDR) where the presence of a unique index may cause spurious dead letter queue (DLQ) entries if the unique index has a smaller index ID than the primary key index. [#147117][#147117] -- Scheduled backups now prevent multiple compaction jobs from running in parallel on its backups. [#145930][#145930] -- Removal of triggers during a restore now accounts for back references that existed because of triggers. [#147306][#147306] +- Fixed a bug where using values `changefeed.aggregator.flush_jitter`, `min_checkpoint_frequency` such that `changefeed.aggregator.flush_jitter * min_checkpoint_frequency < 1` would cause a panic. Jitter will now be disabled in this case. #144304 +- Fixed a bug that could cause queries that perform work in parallel to ignore the requested quality-of-service level. Affected operations include lookup joins, DistSQL execution, and foreign-key checks. #144427 +- Improved the performance of `SHOW CREATE TABLE` on multi-region databases with large numbers of objects. #144900 +- Fixed a bug where running `DROP INDEX` on a hash-sharded index did not properly detect dependencies from functions and procedures on the shard column. This caused the `DROP INDEX` statement to fail with an internal validation error. Now the statement returns a correct error message, and using `DROP INDEX ... CASCADE` works as expected by dropping the dependent functions and procedures. #145107 +- Fixed a bug that prevented variable references using ordinal syntax (like `$1`) from reflecting updates to the variable. Referencing variables declared in PL/pgSQL blocks (instead of parameters) via ordinal syntax is now disallowed. The bug had existed since v24.1. #144347 +- Fixed a bug that caused index expression elements of primary keys to be shown incorrectly in the output of `SHOW CREATE TABLE`. #144716 +- Fixed a bug that could lead to schema changes hanging after a cluster recovered from availability issues. #145462 +- Previously, on a table with multiple column families, CockroachDB could encounter a `Non-nullable column "‹×›:‹×›" with no value` error in rare cases during table statistics collection. The bug was present since v19.2 and is now fixed. #145481 +- Fixed a bug that could cause a row-level TTL job to fail with the error "comparison of two different versions of enum" if an `ENUM` type referenced by the table experienced a schema change. #145374 +- Fixed a bug where the physical cluster replication (PCR) reader catalog job could hit validation errors when schema objects had dependencies between them (for example, when a sequence's default expression was being removed). #145972 +- Creating a vector index on a table that contains a `NULL` vector value will no longer cause an internal error. #145983 +- Fixed an internal assertion failure that could occur during operations like `ALTER TYPE` or `ALTER DATABASE ... ADD REGION` when temporary tables were present. #145551 +- Row-level security (RLS) `SELECT` policies during `UPDATE` operations are now only applied when referenced columns appear in the `SET` or `WHERE` clauses, matching the behavior of PostgreSQL. This improves compatibility. #145344 +- Fixed an issue where using inline log configuration could cause internal errors on the DB Console's Logs page for a node at `#/node/{nodeID}/logs`. #145329 +- Fixed an integer overflow in the `split_part` function when using extremely negative field positions like Go's `math.MinInt64`. #146271 +- Fixed incorrect application of `SELECT` policies to `RETURNING` clauses in `INSERT` and `UPDATE` when no table columns were referenced. #145890 +- Fixed a bug that prevented `TRUNCATE` from succeeding if any indexes on the table had back-reference dependencies, such as from a view or function referencing the index. #146287 +- Fixed a bug where `ALTER TABLE` operations with multiple commands could generate invalid zone configurations. #146369 +- Fixed a bug where an invalid comment in the `system.comment` table for a schema object could make it inaccessible. #146213 +- Fixed a bug where a CockroachDB node could crash when executing `DO` statements that contain currently unsupported DDL statements like `CREATE TYPE` in a non-default configuration (additional logging needed to be enabled, e.g., via the `sql.log.all_statements.enabled` cluster setting). This bug was introduced in v25.1. #146406 +- Prevent use of future timestamps when using `AS OF SYSTEM TIME` with `CREATE TABLE ... AS` and materialized views. Previously, such timestamps could cause errors, delays, or hangs. #146446 +- Fixed an internal error that could be hit when `ADD COLUMN UNIQUE` and `ALTER PRIMARY KEY` were executed within the same transaction. #146567 +- Fixed a bug that prevented temporary views and sequences from being created if the `pg_temp` schema was explicitly specified in the qualified name of the object being created. #146586 +- Fixed a bug where CockroachDB would not use the vectorized fast path for `COPY` when it was supported. The bug was only present in previous v25.2 releases. #146696 +- Errors triggered by DB Console activity no longer cause the node to crash. #145563 +- Fixed a bug to prevent HTTP connections from stopping server shutdown. #146744 +- The MVCC timestamp is now emitted correctly when the `mvcc_timestamp` is used with CDC queries. #146836 +- Fixed a bug in v25.2.0 where a vector search operator could drop user-supplied filters if the same vector column was indexed twice and a vector index with no prefix columns was defined after a vector index with prefix columns. #146259 +- Fixed a bug that could cause the `cockroach` process to `segfault` when collecting runtime execution traces (typically collected via the **Advanced Debug** page in the Console). #146883 +- Fixed a data race in the `cloudstorage` sink. #146297 +- Fixed a bug where the `kv.rangefeed.closed_timestamp.slow_ranges` would not be incremented when a rangefeed closed timestamp was slower than the target threshold. #146949 +- Fixed a bug that could cause an `AFTER` trigger to fail with `client already committed or rolled back the transaction` if the query also contained foreign-key cascades. The bug had existed since `AFTER` triggers were introduced in v24.3. #146890 +- Prevent dropping columns or indexes that are still referenced by triggers. Previously, these operations could succeed silently, potentially breaking trigger functionality. #146683 +- Fixed a bug where searching a vector with a query vector that doesn't match the dimensions of the vector column in the table would cause a node to crash. #146848 +- Specifying types for a subset of columns in a generator function's column definition list now results in a syntax error instead of an internal error. #145492 +- Fixed a bug that caused the SQL Activity > Statement Fingerprint page to fail to load details for statements run with application names containing a `#` character. #147021 +- CockroachDB could previously incorrectly evaluate `to_regclass`, `to_regnamespace`, `to_regproc`, `to_regprocedure`, `to_regrole`, and `to_regtype` builtin functions when the query using them happened to be evaluated in distributed fashion. The bug has been present since the introduction of these builtins in v23.1 and is now fixed. #147362 +- Fixed a bug that caused the optimizer to ignore index hints when optimizing some forms of prepared statements. This could result in one of two unexpected behaviors: a query errors with the message `index cannot be used for this query` when the index can actually be used; or a query uses an index that does not adhere to the hint. The hints relevant to this bug are regular index hints, e.g., `SELECT * FROM tab@index`, `FORCE_INVERTED_INDEX`, and `FORCE_ZIGZAG`. #147368 +- Fixed a bug where the `pg_catalog.pg_policy` table could contain duplicate OID values when multiple tables had policies with the same policy ID. All rows in `pg_policy` now have unique OIDs as required. #147373 +- Fixed a bug where the `rolbypassrls` column in `pg_roles` and `pg_authid` tables always returned false, even for roles with the `BYPASSRLS` option. #147357 +- Fixed a bug that could cause stable expressions to be folded in cached query plans. The bug could cause stable expressions like `current_setting` to return the wrong result if used in a prepared statement. The bug was introduced in point releases v23.2.22, v24.1.14, v24.3.9, and v25.1.2, and the v25.2 alpha. #147187 +- Fixed an issue where updating child metrics and reinitializing metrics at the same time could cause scrape errors. #147486 +- Fixed a runtime panic in the `substring_index` function that occurred when the count argument was the minimum 64-bit integer value. #147546 +- Fixed a memory leak in index backfill jobs where completed spans were duplicated in memory on each progress update after resuming from a checkpoint. This could cause out-of-memory (OOM) errors when backfilling indexes on large tables with many ranges. This bug affected release version v25.2.0 and pre-release versions v25.2.0-alpha.3 through v25.2.0-rc.1. #147511 +- Fixed a bug where prepared statements on schema changes could fail with runtime errors. #147658 +- Fixed an issue with logical data replication (LDR) where the presence of a unique index may cause spurious dead letter queue (DLQ) entries if the unique index has a smaller index ID than the primary key index. #147117 +- Scheduled backups now prevent multiple compaction jobs from running in parallel on its backups. #145930 +- Removal of triggers during a restore now accounts for back references that existed because of triggers. #147306

Performance improvements

-- Prepared statements are now more efficiently cached. [#144021][#144021] -- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. [#145214][#145214] - -[#142826]: https://github.com/cockroachdb/cockroach/pull/142826 -[#143421]: https://github.com/cockroachdb/cockroach/pull/143421 -[#143536]: https://github.com/cockroachdb/cockroach/pull/143536 -[#143579]: https://github.com/cockroachdb/cockroach/pull/143579 -[#143892]: https://github.com/cockroachdb/cockroach/pull/143892 -[#143927]: https://github.com/cockroachdb/cockroach/pull/143927 -[#144021]: https://github.com/cockroachdb/cockroach/pull/144021 -[#144091]: https://github.com/cockroachdb/cockroach/pull/144091 -[#144101]: https://github.com/cockroachdb/cockroach/pull/144101 -[#144181]: https://github.com/cockroachdb/cockroach/pull/144181 -[#144189]: https://github.com/cockroachdb/cockroach/pull/144189 -[#144304]: https://github.com/cockroachdb/cockroach/pull/144304 -[#144347]: https://github.com/cockroachdb/cockroach/pull/144347 -[#144412]: https://github.com/cockroachdb/cockroach/pull/144412 -[#144414]: https://github.com/cockroachdb/cockroach/pull/144414 -[#144427]: https://github.com/cockroachdb/cockroach/pull/144427 -[#144508]: https://github.com/cockroachdb/cockroach/pull/144508 -[#144522]: https://github.com/cockroachdb/cockroach/pull/144522 -[#144567]: https://github.com/cockroachdb/cockroach/pull/144567 -[#144610]: https://github.com/cockroachdb/cockroach/pull/144610 -[#144716]: https://github.com/cockroachdb/cockroach/pull/144716 -[#144900]: https://github.com/cockroachdb/cockroach/pull/144900 -[#145107]: https://github.com/cockroachdb/cockroach/pull/145107 -[#145117]: https://github.com/cockroachdb/cockroach/pull/145117 -[#145214]: https://github.com/cockroachdb/cockroach/pull/145214 -[#145329]: https://github.com/cockroachdb/cockroach/pull/145329 -[#145344]: https://github.com/cockroachdb/cockroach/pull/145344 -[#145374]: https://github.com/cockroachdb/cockroach/pull/145374 -[#145435]: https://github.com/cockroachdb/cockroach/pull/145435 -[#145462]: https://github.com/cockroachdb/cockroach/pull/145462 -[#145481]: https://github.com/cockroachdb/cockroach/pull/145481 -[#145492]: https://github.com/cockroachdb/cockroach/pull/145492 -[#145551]: https://github.com/cockroachdb/cockroach/pull/145551 -[#145563]: https://github.com/cockroachdb/cockroach/pull/145563 -[#145890]: https://github.com/cockroachdb/cockroach/pull/145890 -[#145930]: https://github.com/cockroachdb/cockroach/pull/145930 -[#145972]: https://github.com/cockroachdb/cockroach/pull/145972 -[#145983]: https://github.com/cockroachdb/cockroach/pull/145983 -[#146127]: https://github.com/cockroachdb/cockroach/pull/146127 -[#146192]: https://github.com/cockroachdb/cockroach/pull/146192 -[#146213]: https://github.com/cockroachdb/cockroach/pull/146213 -[#146215]: https://github.com/cockroachdb/cockroach/pull/146215 -[#146259]: https://github.com/cockroachdb/cockroach/pull/146259 -[#146271]: https://github.com/cockroachdb/cockroach/pull/146271 -[#146287]: https://github.com/cockroachdb/cockroach/pull/146287 -[#146297]: https://github.com/cockroachdb/cockroach/pull/146297 -[#146369]: https://github.com/cockroachdb/cockroach/pull/146369 -[#146406]: https://github.com/cockroachdb/cockroach/pull/146406 -[#146432]: https://github.com/cockroachdb/cockroach/pull/146432 -[#146446]: https://github.com/cockroachdb/cockroach/pull/146446 -[#146448]: https://github.com/cockroachdb/cockroach/pull/146448 -[#146522]: https://github.com/cockroachdb/cockroach/pull/146522 -[#146567]: https://github.com/cockroachdb/cockroach/pull/146567 -[#146586]: https://github.com/cockroachdb/cockroach/pull/146586 -[#146683]: https://github.com/cockroachdb/cockroach/pull/146683 -[#146696]: https://github.com/cockroachdb/cockroach/pull/146696 -[#146744]: https://github.com/cockroachdb/cockroach/pull/146744 -[#146764]: https://github.com/cockroachdb/cockroach/pull/146764 -[#146836]: https://github.com/cockroachdb/cockroach/pull/146836 -[#146848]: https://github.com/cockroachdb/cockroach/pull/146848 -[#146883]: https://github.com/cockroachdb/cockroach/pull/146883 -[#146890]: https://github.com/cockroachdb/cockroach/pull/146890 -[#146949]: https://github.com/cockroachdb/cockroach/pull/146949 -[#146979]: https://github.com/cockroachdb/cockroach/pull/146979 -[#147021]: https://github.com/cockroachdb/cockroach/pull/147021 -[#147022]: https://github.com/cockroachdb/cockroach/pull/147022 -[#147045]: https://github.com/cockroachdb/cockroach/pull/147045 -[#147117]: https://github.com/cockroachdb/cockroach/pull/147117 -[#147187]: https://github.com/cockroachdb/cockroach/pull/147187 -[#147237]: https://github.com/cockroachdb/cockroach/pull/147237 -[#147248]: https://github.com/cockroachdb/cockroach/pull/147248 -[#147306]: https://github.com/cockroachdb/cockroach/pull/147306 -[#147357]: https://github.com/cockroachdb/cockroach/pull/147357 -[#147362]: https://github.com/cockroachdb/cockroach/pull/147362 -[#147368]: https://github.com/cockroachdb/cockroach/pull/147368 -[#147373]: https://github.com/cockroachdb/cockroach/pull/147373 -[#147462]: https://github.com/cockroachdb/cockroach/pull/147462 -[#147486]: https://github.com/cockroachdb/cockroach/pull/147486 -[#147511]: https://github.com/cockroachdb/cockroach/pull/147511 -[#147546]: https://github.com/cockroachdb/cockroach/pull/147546 -[#147548]: https://github.com/cockroachdb/cockroach/pull/147548 -[#147658]: https://github.com/cockroachdb/cockroach/pull/147658 +- Prepared statements are now more efficiently cached. #144021 +- TTL jobs now respond to cluster topology changes by restarting and rebalancing across available nodes. #145214 + diff --git a/src/current/_includes/releases/v25.3/v25.3.0-alpha.2.md b/src/current/_includes/releases/v25.3/v25.3.0-alpha.2.md index 0d2d3df181f..94f30d9625a 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-alpha.2.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-alpha.2.md @@ -7,9 +7,9 @@ Release Date: June 16, 2025

General changes

- Changefeed source metadata now includes the `crdb_internal_table_id` field, enabling downstream consumers to uniquely identify tables even if table names change. - [#147341][#147341] + #147341 - Changefeeds emitting to Kafka sinks that were created in CockroachDB v24.2.1+, or v23.2.10+ and v24.1.4+ with the `changefeed.new_kafka_sink.enabled` cluster setting enabled now include the message key, size, and MVCC timestamp in message too large error logs. - [#147543][#147543] + #147543

SQL language changes

@@ -17,60 +17,40 @@ Release Date: June 16, 2025 - All log entries generated during the execution of a SQL statement and are prefixed by `querytag-`. - Traces and are prefixed by `querytag-`. - In the `crdb_internal.cluster_execution_insights` and `crdb_internal.node_execution_insights` virtual tables in a new `query_tags` JSONB column. - This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). [#145435][#145435] -- Added a session variable `initial_retry_backoff_for_read_committed` that controls the initial backoff duration when retrying an individual statement in an explicit `READ COMMITTED` transaction. A duration of `0` disables exponential backoff. If a statement in an explicit `READ COMMITTED` transaction is failing with the `40001` error `ERROR: restart transaction: read committed retry limit exceeded; set by max_retries_for_read_committed=...`, then you should set `initial_retry_backoff_for_read_committed` to a duration proportional to the typical execution time of the statement (in addition to also increasing `max_retries_for_read_committed`). [#146860][#146860] + This feature is disabled by default and can be enabled using the `sql.sqlcommenter.enabled` cluster setting. Comments must follow the [SQLCommenter specification](https://google.github.io/sqlcommenter/spec/). #145435 +- Added a session variable `initial_retry_backoff_for_read_committed` that controls the initial backoff duration when retrying an individual statement in an explicit `READ COMMITTED` transaction. A duration of `0` disables exponential backoff. If a statement in an explicit `READ COMMITTED` transaction is failing with the `40001` error `ERROR: restart transaction: read committed retry limit exceeded; set by max_retries_for_read_committed=...`, then you should set `initial_retry_backoff_for_read_committed` to a duration proportional to the typical execution time of the statement (in addition to also increasing `max_retries_for_read_committed`). #146860 - Added the `SHOW CREATE ALL ROUTINES` statement, which can be used to show `CREATE` statements for all user-defined functions (UDFs) and procedures in the current database. - [#147452][#147452] + #147452 - Added the metrics `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count`, which count the number of automatic retries of SQL transactions and statements, respectively, within the database. These metrics differ from the related `txn.restarts.*` metrics, which count retryable errors emitted by the KV layer that must be retried. The new `sql.txn.auto_retry.count` and `sql.statements.auto_retry.count` metrics count auto-retry actions taken by the SQL layer in response to some of those retryable errors. - [#147682][#147682] -- Increased the default value for the `max_retries_for_read_committed` session variable from `10` to `100`. Testing has shown that some high-contention workloads running under `READ COMMITTED` isolation benefit from more statement retries. [#147869][#147869] -- The session variable `initial_retry_backoff_for_read_committed` now defaults to `2` (milliseconds). Testing has shown that some high-contention workloads running under `READ COMMITTED` isolation benefit from exponential backoff. `2` might be too quick of an initial backoff for longer-running statements, but setting this value much higher than the normal duration of execution will cause excessive delay. [#147869][#147869] + #147682 +- Increased the default value for the `max_retries_for_read_committed` session variable from `10` to `100`. Testing has shown that some high-contention workloads running under `READ COMMITTED` isolation benefit from more statement retries. #147869 +- The session variable `initial_retry_backoff_for_read_committed` now defaults to `2` (milliseconds). Testing has shown that some high-contention workloads running under `READ COMMITTED` isolation benefit from exponential backoff. `2` might be too quick of an initial backoff for longer-running statements, but setting this value much higher than the normal duration of execution will cause excessive delay. #147869

Operational changes

- Added an `alter_changefeed` structured log event to provide more visibility into when an `ALTER CHANGEFEED` event occurred and what changed. - [#147679][#147679] + #147679 - Added new timeseries metrics to the `storage.value_separation.*` namespace for observing the behavior of [storage engine value separation]({% link v25.3/architecture/storage-layer.md %}#value-separation). - [#147728][#147728] + #147728

DB Console changes

- The Hot Ranges page node filter has been moved out of the main filter container and now filters nodes on the backend to reduce load time. - [#147089][#147089] + #147089 - The Insights page in the DB Console now displays SQL commenter query tags for statement executions. These tags provide application context (such as application name, user ID, or feature flags) embedded in SQL comments using the `sqlcommenter` format. This information can help correlate slow query performance with specific application states. The Query Tags column is available in the Statement Executions view's Statement Insights table, but it is hidden by default. To display it, use the Columns selector. - [#147439][#147439] + #147439 - Retry counts for statements executing under `READ COMMITTED` isolation are now more accurate. - [#147682][#147682] + #147682

Bug fixes

-- Fixed an issue where self-referencing triggers did not have their dependencies properly recorded, which could lead to broken dependencies. [#147018][#147018] -- Fixed a security issue where optimizer predicate reordering could leak information about hidden rows protected by row-level security (RLS) policies. [#147348][#147348] -- Fixed a bug on the SQL Activity Statements and Transactions pages where the time picker failed to support sub-hour time ranges when `sql.stats.aggregation.interval` was set to a value under 1 hour. Previously, selecting a short time window (e.g., 10 minutes) would query for a full hour of data. This fix ensures that the selected time range is respected, enabling more precise analysis of recent activity. [#147447][#147447] -- `FUNCTION` and `PROCEDURE` are now shown via `\h SHOW CREATE` in the CLI doc. [#147666][#147666] -- Fixed a bug where functions lost their row-level security (RLS) policy backreferences, leading to schema change failures. [#147696][#147696] -- Fixed a bug where `ALTER TABLE` was modifying identity attributes on columns not backed by a sequence. [#147698][#147698] -- Fixed an error in `crdb_internal.table_spans` when a table's schema had been dropped. [#147766][#147766] -- Fixed a bug where introspection queries (e.g., querying the `crdb_internal` system catalog) could fail if a dropped constraint referenced a column that was also being dropped. [#147773][#147773] -- Fixed a bug where adding multiple columns in a single statement with `AddGeometryColumn` would cause runtime errors. [#147998][#147998] +- Fixed an issue where self-referencing triggers did not have their dependencies properly recorded, which could lead to broken dependencies. #147018 +- Fixed a security issue where optimizer predicate reordering could leak information about hidden rows protected by row-level security (RLS) policies. #147348 +- Fixed a bug on the SQL Activity Statements and Transactions pages where the time picker failed to support sub-hour time ranges when `sql.stats.aggregation.interval` was set to a value under 1 hour. Previously, selecting a short time window (e.g., 10 minutes) would query for a full hour of data. This fix ensures that the selected time range is respected, enabling more precise analysis of recent activity. #147447 +- `FUNCTION` and `PROCEDURE` are now shown via `\h SHOW CREATE` in the CLI doc. #147666 +- Fixed a bug where functions lost their row-level security (RLS) policy backreferences, leading to schema change failures. #147696 +- Fixed a bug where `ALTER TABLE` was modifying identity attributes on columns not backed by a sequence. #147698 +- Fixed an error in `crdb_internal.table_spans` when a table's schema had been dropped. #147766 +- Fixed a bug where introspection queries (e.g., querying the `crdb_internal` system catalog) could fail if a dropped constraint referenced a column that was also being dropped. #147773 +- Fixed a bug where adding multiple columns in a single statement with `AddGeometryColumn` would cause runtime errors. #147998 -[#147018]: https://github.com/cockroachdb/cockroach/pull/147018 -[#147348]: https://github.com/cockroachdb/cockroach/pull/147348 -[#147447]: https://github.com/cockroachdb/cockroach/pull/147447 -[#147543]: https://github.com/cockroachdb/cockroach/pull/147543 -[#146860]: https://github.com/cockroachdb/cockroach/pull/146860 -[#147682]: https://github.com/cockroachdb/cockroach/pull/147682 -[#147679]: https://github.com/cockroachdb/cockroach/pull/147679 -[#147089]: https://github.com/cockroachdb/cockroach/pull/147089 -[#147666]: https://github.com/cockroachdb/cockroach/pull/147666 -[#147698]: https://github.com/cockroachdb/cockroach/pull/147698 -[#147773]: https://github.com/cockroachdb/cockroach/pull/147773 -[#147998]: https://github.com/cockroachdb/cockroach/pull/147998 -[#145435]: https://github.com/cockroachdb/cockroach/pull/145435 -[#147452]: https://github.com/cockroachdb/cockroach/pull/147452 -[#147869]: https://github.com/cockroachdb/cockroach/pull/147869 -[#147766]: https://github.com/cockroachdb/cockroach/pull/147766 -[#147439]: https://github.com/cockroachdb/cockroach/pull/147439 -[#147341]: https://github.com/cockroachdb/cockroach/pull/147341 -[#147728]: https://github.com/cockroachdb/cockroach/pull/147728 -[#147696]: https://github.com/cockroachdb/cockroach/pull/147696 diff --git a/src/current/_includes/releases/v25.3/v25.3.0-alpha.3.md b/src/current/_includes/releases/v25.3/v25.3.0-alpha.3.md index 6e3cef22ab5..64d1ad8368e 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-alpha.3.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-alpha.3.md @@ -6,30 +6,23 @@ Release Date: June 23, 2025

Security updates

-- CockroachDB can now synchronize SQL role membership from the groups claim contained in a JWT when the cluster setting `server.jwt_authentication.authorization.enabled` is set to `true`. The claim name and the fallback `userinfo` JSON key are configurable by the cluster settings `server.jwt_authentication.group_claim` and `server.jwt_authentication.userinfo_group_key` respectively. This behavior matches the existing LDAP role-sync feature. [#147318][#147318] +- CockroachDB can now synchronize SQL role membership from the groups claim contained in a JWT when the cluster setting `server.jwt_authentication.authorization.enabled` is set to `true`. The claim name and the fallback `userinfo` JSON key are configurable by the cluster settings `server.jwt_authentication.group_claim` and `server.jwt_authentication.userinfo_group_key` respectively. This behavior matches the existing LDAP role-sync feature. #147318

SQL language changes

-- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] -- Partial indexes can now reference user-defined functions. [#147817][#147817] -- Computed column expressions and `ON UPDATE` expressions can now reference user-defined functions. [#147817][#147817] -- `IMPORT TABLE` as well `PGDUMP` and `MYSQLDUMP` formats of `IMPORT` are now fully removed. These have been deprecated since v23.2. [#148248][#148248] +- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. #146250 +- Partial indexes can now reference user-defined functions. #147817 +- Computed column expressions and `ON UPDATE` expressions can now reference user-defined functions. #147817 +- `IMPORT TABLE` as well `PGDUMP` and `MYSQLDUMP` formats of `IMPORT` are now fully removed. These have been deprecated since v23.2. #148248

Command-line changes

-- Removed the stale `--oss` flag from the `dev ui watch` subcommand. This flag was no longer in use, as the UI development workflow now exclusively targets the CCL build. This change simplifies the tool by removing an unused build path and potential confusion for developers. [#147978][#147978] +- Removed the stale `--oss` flag from the `dev ui watch` subcommand. This flag was no longer in use, as the UI development workflow now exclusively targets the CCL build. This change simplifies the tool by removing an unused build path and potential confusion for developers. #147978

Bug fixes

-- Fixed a bug that allowed a column to be dropped from a table even if it was referenced in the `RETURNING` clause of an `UPDATE` or `DELETE` statement in a routine. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] -- Fixed a bug where `libpq` clients using the async API could hang with large result sets (Python: psycopg; Ruby: ActiveRecord, ruby-pg). [#148222][#148222] +- Fixed a bug that allowed a column to be dropped from a table even if it was referenced in the `RETURNING` clause of an `UPDATE` or `DELETE` statement in a routine. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. #146250 +- Fixed a bug where `libpq` clients using the async API could hang with large result sets (Python: psycopg; Ruby: ActiveRecord, ruby-pg). #148222 - Restore no longer gets stuck in the `reverting` state after failed cleanup of dropped temporary system tables. - [#148098][#148098] - -[#147318]: https://github.com/cockroachdb/cockroach/pull/147318 -[#146250]: https://github.com/cockroachdb/cockroach/pull/146250 -[#147817]: https://github.com/cockroachdb/cockroach/pull/147817 -[#148248]: https://github.com/cockroachdb/cockroach/pull/148248 -[#147978]: https://github.com/cockroachdb/cockroach/pull/147978 -[#148222]: https://github.com/cockroachdb/cockroach/pull/148222 -[#148098]: https://github.com/cockroachdb/cockroach/pull/148098 + #148098 + diff --git a/src/current/_includes/releases/v25.3/v25.3.0-beta.1.md b/src/current/_includes/releases/v25.3/v25.3.0-beta.1.md index d42e715d618..bdd4db6ae6b 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-beta.1.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-beta.1.md @@ -7,52 +7,39 @@ Release Date: July 2, 2025

SQL language changes

- Directionality may no longer be assigned to any vector index column. Prefix columns are not scannable in a vector index, so directionality is not relevant to them. - [#147307][#147307] + #147307 - Changed the basic sequence caching option to cache at the per-node level by default. The `PER SESSION CACHE` sequence option is added to provide the previous per-session cache behavior. - [#148290][#148290] + #148290 - Removed the 'started' column in `SHOW JOBS`, which was a duplicate of the 'created' column. - [#148464][#148464] + #148464

Operational changes

- Introduced the following cluster settings for enabling and configuring [value separation in the storage engine]({% link v25.3/architecture/storage-layer.md %}#value-separation): `storage.value_separation.enabled`, `storage.value_separation.minimum_size`, and `storage.value_separation.max_reference_depth`. - [#148535][#148535] + #148535 - Non-admin users no longer have access to changefeed jobs they do not own and which are not owned by a role of which they are a member, regardless of whether they have the `CHANGEFEED` privilege on the table or tables those jobs may be watching. Admin users, or those with global `SHOWJOB` / `CONTROLJOB` privileges, can still interact with all jobs, regardless of ownership. - [#148537][#148537] + #148537 - In order to selectively capture traces for transactions running in an active workload without having to capture them via statement diagnostic bundles, customers can now use the `sql.trace.txn.sample_rate` cluster setting to enable tracing for a fraction of their workload. The `sql.trace.txn.enable_threshold` will still need to be set to a positive value to provide a filter for how slow a transaction needs to be after being sampled to merit emitting a trace. Traces are emitted to the `SQL_EXEC` logging channel. - [#148542][#148542] + #148542 - Added the following cluster settings for configuring blob file rewrite compactions: `storage.value_separation.rewrite_minimum_age` and `storage.value_separation.compaction_garbage_threshold`. - [#148837][#148837] + #148837 - Added the `replicas.cpunanospersecond` metric. Notably, when child labels are enabled, this metric exposes evaluation-related Replica CPU usage by tenant. - [#146526][#146526] + #146526 - CockroachDB now raises an error when encountering improper inline SSL credentials instead of panicking. - [#148242][#148242] + #148242 - Restore will now re-attempt `AdminSplit` KV requests instead of immediately failing and pausing the job. - [#148484][#148484] + #148484

Bug fixes

- Fixed a bug where using column families on tables with vector indexes would cause the index to fail to return results. - [#147307][#147307] + #147307 - Large mutation statements (`INSERT`, `UPDATE`, `DELETE`, `UPSERT`) are now less likely to encounter contention with automatic table statistics collection in some cases. The bug was present since v19.1. - [#148488][#148488] + #148488

Performance improvements

- The optimizer will no longer apply a fast-path to deletes cascading to `REGIONAL BY ROW` tables. This prevents the cascading delete from accessing more regions than necessary. - [#148105][#148105] - - -[#148535]: https://github.com/cockroachdb/cockroach/pull/148535 -[#148542]: https://github.com/cockroachdb/cockroach/pull/148542 -[#148105]: https://github.com/cockroachdb/cockroach/pull/148105 -[#146526]: https://github.com/cockroachdb/cockroach/pull/146526 -[#147307]: https://github.com/cockroachdb/cockroach/pull/147307 -[#148464]: https://github.com/cockroachdb/cockroach/pull/148464 -[#148537]: https://github.com/cockroachdb/cockroach/pull/148537 -[#148837]: https://github.com/cockroachdb/cockroach/pull/148837 -[#148488]: https://github.com/cockroachdb/cockroach/pull/148488 -[#148242]: https://github.com/cockroachdb/cockroach/pull/148242 -[#148484]: https://github.com/cockroachdb/cockroach/pull/148484 -[#148839]: https://github.com/cockroachdb/cockroach/pull/148839 -[#148290]: https://github.com/cockroachdb/cockroach/pull/148290 + #148105 + + diff --git a/src/current/_includes/releases/v25.3/v25.3.0-beta.2.md b/src/current/_includes/releases/v25.3/v25.3.0-beta.2.md index 87c671027c0..f5ecd3c9395 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-beta.2.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-beta.2.md @@ -7,16 +7,13 @@ Release Date: July 9, 2025

General changes

- For virtual clusters, hot range logging is now performed by a single job on one node, rather than by tasks on every node. - [#148926][#148926] + #148926

Bug fixes

- CockroachDB now prohibits `ORDER BY` and join equality operations on `REFCURSOR` types, matching PostgreSQL behavior. - [#149292][#149292] + #149292 - Fixed an issue where CockroachDB could hit an internal error when performing a `DELETE`, `UPDATE`, or `UPSERT` where the initial scan of the mutation is locking and is on a table different from the one being mutated. A possible workaround was `SET enable_implicit_select_for_update = false`, but this could increase contention. The bug was introduced in v25.2 and is now fixed. - [#149302][#149302] + #149302 -[#148926]: https://github.com/cockroachdb/cockroach/pull/148926 -[#149292]: https://github.com/cockroachdb/cockroach/pull/149292 -[#149302]: https://github.com/cockroachdb/cockroach/pull/149302 diff --git a/src/current/_includes/releases/v25.3/v25.3.0-beta.3.md b/src/current/_includes/releases/v25.3/v25.3.0-beta.3.md index e3da83743b4..287c319c7d1 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-beta.3.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-beta.3.md @@ -7,19 +7,15 @@ Release Date: July 14, 2025

SQL language changes

- Added support for invoking a UDF from a view query. Renaming or setting the schema on the routine is currently not allowed if it is referenced by a view. - [#149514][#149514] + #149514 - The session setting `optimizer_prefer_bounded_cardinality` is now enabled by default. This setting instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. - [#149675][#149675] + #149675

Bug fixes

- Fixed a bug that would cause a `CALL` statement executed via a portal in the extended wire protocol to result in an error like `unknown portal ""` if the stored procedure contained `COMMIT` or `ROLLBACK` statements. The bug had existed since PL/pgSQL transaction control statements were introduced in v24.1. The fix is off by default in versions prior to v25.3. - [#149385][#149385] + #149385 - In v25.1, automatic partial statistics collection was enabled by default (by setting the `sql.stats.automatic_partial_collection.enabled` cluster setting to `true`). Partial statistics collection may encounter certain expected scenarios that were previously reported as failed stats jobs with PostgreSQL error code `55000`. These errors are benign and are no longer reported. Instead, the stats job will be marked as "succeeded," though no new statistics will be created. - [#149626][#149626] + #149626 -[#149514]: https://github.com/cockroachdb/cockroach/pull/149514 -[#149675]: https://github.com/cockroachdb/cockroach/pull/149675 -[#149385]: https://github.com/cockroachdb/cockroach/pull/149385 -[#149626]: https://github.com/cockroachdb/cockroach/pull/149626 diff --git a/src/current/_includes/releases/v25.3/v25.3.0-rc.1.md b/src/current/_includes/releases/v25.3/v25.3.0-rc.1.md index 07a26fe51d2..03bbc98b2d4 100644 --- a/src/current/_includes/releases/v25.3/v25.3.0-rc.1.md +++ b/src/current/_includes/releases/v25.3/v25.3.0-rc.1.md @@ -7,15 +7,15 @@ Release Date: July 23, 2025

SQL language changes

- The `CITEXT` data type is now supported, enabling case-insensitive comparisons for `CITEXT` columns. Internally, `CITEXT` is equivalent to using the undetermined level 2 collation `und-u-ks-level2`. For example, under `CITEXT`, the expression `'test' = 'TEST'` returns `TRUE`. - [#149819][#149819] + #149819 - Added support for automatically determining the region column for a `REGIONAL BY ROW` table using a foreign key constraint. The foreign key is specified by setting a new table storage parameter `infer_rbr_region_col_using_constraint`, and must contain the region column. This can be useful for applications that are unable to guarantee that a child row is inserted or updated from the same region as the matching parent row. - [#150366][#150366] + #150366 - The session setting `optimizer_min_row_count`, which sets a lower bound on row count estimates for relational expressions during query planning, is now set to `1` by default. - [#150376][#150376] + #150376 - The `options` column in the output of `SHOW ROLES` and `SHOW USERS` is now returned as an array of strings (e.g., `{NOLOGIN,CREATEDB}`) rather than as a single comma-separated string. This enables more efficient querying of role options using array functions like `unnest()`. For example: `SELECT * FROM [SHOW ROLES] AS r WHERE EXISTS (SELECT 1 FROM unnest(r.options) AS m(option) WHERE option LIKE 'SUBJECT=cn%');` - [#149537][#149537] + #149537 - The `SHOW ROLES` and `SHOW USERS` commands now include an `estimated_last_login_time` column that displays the estimated timestamp of when each user last authenticated to the database. This column shows `NULL` for users who have never logged in, and for existing users after upgrading to v25.3 until their next login. The tracking is performed on a best-effort basis and may not capture every login event. - [#149537][#149537] + #149537

Operational changes

@@ -23,35 +23,24 @@ Release Date: July 23, 2025 - `obs.execution_tracer.interval`: Enables the tracer and sets the interval for capturing traces. Set to a value greater than 0 to activate. - `obs.execution_tracer.duration`: Specifies the duration for each captured trace. - `obs.execution_tracer.total_dump_size_limit`: Sets the maximum disk space allowed for storing execution traces. Older traces are automatically deleted when this limit is reached. - [#149705][#149705] + #149705 - The value of `sql.stats.error_on_concurrent_create_stats.enabled` now defaults to `false`, suppressing error counters for auto stats jobs that fail due to concurrent stats jobs in progress. - [#149857][#149857] + #149857

Bug fixes

- Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (the gateway node of the plan was not affected), and could only be mitigated by restarting the node. - [#149920][#149920] + #149920 - Fixed an issue where some SQL metrics were not reported when `server.child_metrics.enabled` was enabled, `server.child_metrics.include_aggregate.enabled` was disabled, and `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` were also disabled. Specifically, metrics with no children now report their aggregate metrics regardless of the `server.child_metrics.include_aggregate.enabled` cluster setting. - [#149929][#149929] + #149929 - Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. - [#150296][#150296] + #150296 - Fixed an issue where discarding zone configs on sequences did not actually remove the configuration. - [#150360][#150360] + #150360

Performance improvements

- Mutation statements (`UPDATE` and `DELETE`) that perform lookup joins into multi-region tables (perhaps as part of a `CASCADE`) are now more likely to parallelize the lookups across ranges, improving their performance. - [#150016][#150016] - - -[#150376]: https://github.com/cockroachdb/cockroach/pull/150376 -[#149705]: https://github.com/cockroachdb/cockroach/pull/149705 -[#149857]: https://github.com/cockroachdb/cockroach/pull/149857 -[#149929]: https://github.com/cockroachdb/cockroach/pull/149929 -[#150360]: https://github.com/cockroachdb/cockroach/pull/150360 -[#149819]: https://github.com/cockroachdb/cockroach/pull/149819 -[#150366]: https://github.com/cockroachdb/cockroach/pull/150366 -[#149920]: https://github.com/cockroachdb/cockroach/pull/149920 -[#150296]: https://github.com/cockroachdb/cockroach/pull/150296 -[#150016]: https://github.com/cockroachdb/cockroach/pull/150016 -[#149537]: https://github.com/cockroachdb/cockroach/pull/149537 + #150016 + + diff --git a/src/current/_includes/releases/v25.3/v25.3.1.md b/src/current/_includes/releases/v25.3/v25.3.1.md index 927277f1528..384017386a3 100644 --- a/src/current/_includes/releases/v25.3/v25.3.1.md +++ b/src/current/_includes/releases/v25.3/v25.3.1.md @@ -6,32 +6,21 @@ Release Date: August 29, 2025

Operational changes

-- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. [#151483][#151483] +- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. #151483

Bug fixes

-- Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. [#150440][#150440] -- Fixed invalid zone configurations that were generated when adding a super region to a 3-region database with a secondary region and region survivability. Previously, this could result in assigning more than the allowed number of replicas. [#150620][#150620] -- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to go undetected, potentially causing incomplete backups. [#151080][#151080] -- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. [#151136][#151136] -- Previously, CockroachDB could encounter an internal error `trying to add a column of UNKNOWN type at ...` in rare cases when handling `CASE` or `OR` operations. This bug was present since v20.2 and is now fixed. [#151160][#151160] -- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. [#151247][#151247] -- Fixed a bug where sequences could lose references to triggers, allowing them to be dropped incorrectly. [#151593][#151593] -- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. [#152183][#152183] +- Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. #150440 +- Fixed invalid zone configurations that were generated when adding a super region to a 3-region database with a secondary region and region survivability. Previously, this could result in assigning more than the allowed number of replicas. #150620 +- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to go undetected, potentially causing incomplete backups. #151080 +- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. #151136 +- Previously, CockroachDB could encounter an internal error `trying to add a column of UNKNOWN type at ...` in rare cases when handling `CASE` or `OR` operations. This bug was present since v20.2 and is now fixed. #151160 +- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. #151247 +- Fixed a bug where sequences could lose references to triggers, allowing them to be dropped incorrectly. #151593 +- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. #152183

Miscellaneous

-- Updated Go version to 1.23.11. [#150868][#150868] +- Updated Go version to 1.23.11. #150868 -[#151160]: https://github.com/cockroachdb/cockroach/pull/151160 -[#151593]: https://github.com/cockroachdb/cockroach/pull/151593 -[#150868]: https://github.com/cockroachdb/cockroach/pull/150868 -[#150440]: https://github.com/cockroachdb/cockroach/pull/150440 -[#150620]: https://github.com/cockroachdb/cockroach/pull/150620 -[#151136]: https://github.com/cockroachdb/cockroach/pull/151136 -[#151247]: https://github.com/cockroachdb/cockroach/pull/151247 -[#152183]: https://github.com/cockroachdb/cockroach/pull/152183 -[#150579]: https://github.com/cockroachdb/cockroach/pull/150579 -[#151483]: https://github.com/cockroachdb/cockroach/pull/151483 -[#151080]: https://github.com/cockroachdb/cockroach/pull/151080 diff --git a/src/current/_includes/releases/v25.3/v25.3.2.md b/src/current/_includes/releases/v25.3/v25.3.2.md index 63b92896932..db216b936f1 100644 --- a/src/current/_includes/releases/v25.3/v25.3.2.md +++ b/src/current/_includes/releases/v25.3/v25.3.2.md @@ -10,53 +10,37 @@ Release Date: September 22, 2025

SQL language changes

-- Added a new session variable, `disable_optimizer_rules`, which allows users to provide a comma-separated list of optimizer rules to disable during query optimization. This allows users to avoid rules that are known to create a suboptimal query plan for specific queries. [#152350][#152350] -- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. [#152594][#152594] +- Added a new session variable, `disable_optimizer_rules`, which allows users to provide a comma-separated list of optimizer rules to disable during query optimization. This allows users to avoid rules that are known to create a suboptimal query plan for specific queries. #152350 +- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. #152594

Operational changes

-- `auth.ldap.conn.latency.internal` has been added to denote the internal authentication time for ldap auth method. [#152341][#152341] +- `auth.ldap.conn.latency.internal` has been added to denote the internal authentication time for ldap auth method. #152341

Bug fixes

-- Addressed a bug on `schema_locked` tables when a column is dropped, and `schema_locked` is toggled for the user. [#151527][#151527] -- Fixed a bug that could cause excessive memory allocations when compacting timeseries keys. [#151815][#151815] -- Fixed a bug where `DROP USER` succeeded even though a role owned default privileges, which could leave invalid privilege entries in the system. [#151818][#151818] -- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. [#152138][#152138] -- Fixed a bug where `SHOW TABLES` would show inaccurate row counts if the most recent statistics collection was partial. [#152200][#152200] -- Fixed a bug that prevented `RESTORE` from working if there were computed columns or `ON UPDATE` expressions that referenced user-defined functions (UDFs). This bug was introduced in v25.3.0. [#152217][#152217] -- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. [#152315][#152315] +- Addressed a bug on `schema_locked` tables when a column is dropped, and `schema_locked` is toggled for the user. #151527 +- Fixed a bug that could cause excessive memory allocations when compacting timeseries keys. #151815 +- Fixed a bug where `DROP USER` succeeded even though a role owned default privileges, which could leave invalid privilege entries in the system. #151818 +- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. #152138 +- Fixed a bug where `SHOW TABLES` would show inaccurate row counts if the most recent statistics collection was partial. #152200 +- Fixed a bug that prevented `RESTORE` from working if there were computed columns or `ON UPDATE` expressions that referenced user-defined functions (UDFs). This bug was introduced in v25.3.0. #152217 +- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. #152315 - Fixed a bug that allowed foreign-key violations to result from some combinations of concurrent `READ COMMITTED` and `SERIALIZABLE` transactions. If both `SERIALIZABLE` and weaker-isolation transactions will concurrently modify rows involved in foreign-key relationships, the `SERIALIZABLE` transactions must have the following session variables set in order to prevent any possible foreign-key violations: - `SET enable_implicit_fk_locking_for_serializable = on;` - `SET enable_shared_locking_for_serializable = on;` - - `SET enable_durable_locking_for_serializable = on;` [#152374][#152374] -- Added an automatic repair for dangling or invalid entries in the `system.comments` table. [#152473][#152473] -- Added the `use_soft_limit_for_distribute_scan` session variable (default: `false`), which controls whether CockroachDB uses the soft row count estimate when deciding whether an execution plan should be distributed. In v25.1, the physical planning heuristics were changed such that large constrained table scans, estimated to scan at least 10,000 rows (controlled via `distribute_scan_row_count_threshold`), would force plan distribution when `distsql=auto`. However, if the scan had a "soft limit" CockroachDB would still use the full estimate (for example, `10,000` in `estimated row count: 100–10,000`), sometimes unnecessarily distributing queries and increasing latency. The `use_soft_limit_for_distribute_scan` session variable addresses this by allowing the planner to use the soft limit when deciding whether a scan is "large". [#152556][#152556] -- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. [#152680][#152680] + - `SET enable_durable_locking_for_serializable = on;` #152374 +- Added an automatic repair for dangling or invalid entries in the `system.comments` table. #152473 +- Added the `use_soft_limit_for_distribute_scan` session variable (default: `false`), which controls whether CockroachDB uses the soft row count estimate when deciding whether an execution plan should be distributed. In v25.1, the physical planning heuristics were changed such that large constrained table scans, estimated to scan at least 10,000 rows (controlled via `distribute_scan_row_count_threshold`), would force plan distribution when `distsql=auto`. However, if the scan had a "soft limit" CockroachDB would still use the full estimate (for example, `10,000` in `estimated row count: 100–10,000`), sometimes unnecessarily distributing queries and increasing latency. The `use_soft_limit_for_distribute_scan` session variable addresses this by allowing the planner to use the soft limit when deciding whether a scan is "large". #152556 +- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. #152680

Performance improvements

-- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. [#152632][#152632] +- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. #152632

Miscellaneous

- Tunes S3 client retry behavior to be more reliable in the - presence of correlated errors. [#151873][#151873] - - -[#152594]: https://github.com/cockroachdb/cockroach/pull/152594 -[#151815]: https://github.com/cockroachdb/cockroach/pull/151815 -[#151818]: https://github.com/cockroachdb/cockroach/pull/151818 -[#152374]: https://github.com/cockroachdb/cockroach/pull/152374 -[#152473]: https://github.com/cockroachdb/cockroach/pull/152473 -[#152680]: https://github.com/cockroachdb/cockroach/pull/152680 -[#152138]: https://github.com/cockroachdb/cockroach/pull/152138 -[#151873]: https://github.com/cockroachdb/cockroach/pull/151873 -[#151527]: https://github.com/cockroachdb/cockroach/pull/151527 -[#152200]: https://github.com/cockroachdb/cockroach/pull/152200 -[#152556]: https://github.com/cockroachdb/cockroach/pull/152556 -[#152350]: https://github.com/cockroachdb/cockroach/pull/152350 -[#152341]: https://github.com/cockroachdb/cockroach/pull/152341 -[#152217]: https://github.com/cockroachdb/cockroach/pull/152217 -[#152315]: https://github.com/cockroachdb/cockroach/pull/152315 -[#152632]: https://github.com/cockroachdb/cockroach/pull/152632 + presence of correlated errors. #151873 + + diff --git a/src/current/_includes/releases/v25.3/v25.3.3.md b/src/current/_includes/releases/v25.3/v25.3.3.md index 2678277747e..957e8c27688 100644 --- a/src/current/_includes/releases/v25.3/v25.3.3.md +++ b/src/current/_includes/releases/v25.3/v25.3.3.md @@ -6,30 +6,19 @@ Release Date: October 17, 2025

Operational changes

-- Added the cluster setting `kvadmission.use_range_tenant_id_for_non_admin.enabled`, which can be used to disable the behavior where admission control uses the range's tenant ID for non-admin requests. This behavior is disabled by default. [#153460][#153460] +- Added the cluster setting `kvadmission.use_range_tenant_id_for_non_admin.enabled`, which can be used to disable the behavior where admission control uses the range's tenant ID for non-admin requests. This behavior is disabled by default. #153460

Bug fixes

-- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. [#152965][#152965] -- Fixed a bug where index creation could fail due to validation errors if the schema change was retried or paused/resumed during the backfill. [#153597][#153597] -- Fixed a bug introduced in v25.1.0 that would cause a node panic if a `SIGINT` signal was sent during the execution of a `CHECK EXTERNAL CONNECTION` command. [#153602][#153602] -- Fixed a bug where `ALTER POLICY` was incorrectly dropping dependency tracking for functions, sequences, or types in policy expressions. [#153804][#153804] -- Fixed a bug introduced in v25.1 where CockroachDB nodes could crash when executing `DO` statements that referenced (possibly nonexistent) user-defined types in non-default configurations. The crash only occurred if additional logging was enabled (for example, with the `sql.log.all_statements.enabled` cluster setting). [#153913][#153913] -- Fixed a runtime error that could be hit if a new secondary index had a name collision with a primary index. [#154016][#154016] -- Fixed a bug where the `schema_locked` storage parameter was not being enforced on the `TRUNCATE` command, which could cause changefeed jobs to fail. [#154041][#154041] -- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. [#154286][#154286] -- Disabled the `kv.lock_table.unreplicated_lock_reliability.split.enabled` feature, which could lead to a node crash. [#155414][#155414] -- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. [#154397][#154397] - - -[#153597]: https://github.com/cockroachdb/cockroach/pull/153597 -[#153602]: https://github.com/cockroachdb/cockroach/pull/153602 -[#153913]: https://github.com/cockroachdb/cockroach/pull/153913 -[#154016]: https://github.com/cockroachdb/cockroach/pull/154016 -[#155414]: https://github.com/cockroachdb/cockroach/pull/155414 -[#154397]: https://github.com/cockroachdb/cockroach/pull/154397 -[#153460]: https://github.com/cockroachdb/cockroach/pull/153460 -[#152965]: https://github.com/cockroachdb/cockroach/pull/152965 -[#153804]: https://github.com/cockroachdb/cockroach/pull/153804 -[#154041]: https://github.com/cockroachdb/cockroach/pull/154041 -[#154286]: https://github.com/cockroachdb/cockroach/pull/154286 +- Fixed a bug where an `INSERT` statement could fail with a type checking error while adding a `BIT(n)` column. #152965 +- Fixed a bug where index creation could fail due to validation errors if the schema change was retried or paused/resumed during the backfill. #153597 +- Fixed a bug introduced in v25.1.0 that would cause a node panic if a `SIGINT` signal was sent during the execution of a `CHECK EXTERNAL CONNECTION` command. #153602 +- Fixed a bug where `ALTER POLICY` was incorrectly dropping dependency tracking for functions, sequences, or types in policy expressions. #153804 +- Fixed a bug introduced in v25.1 where CockroachDB nodes could crash when executing `DO` statements that referenced (possibly nonexistent) user-defined types in non-default configurations. The crash only occurred if additional logging was enabled (for example, with the `sql.log.all_statements.enabled` cluster setting). #153913 +- Fixed a runtime error that could be hit if a new secondary index had a name collision with a primary index. #154016 +- Fixed a bug where the `schema_locked` storage parameter was not being enforced on the `TRUNCATE` command, which could cause changefeed jobs to fail. #154041 +- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. #154286 +- Disabled the `kv.lock_table.unreplicated_lock_reliability.split.enabled` feature, which could lead to a node crash. #155414 +- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with an error containing the text `restoring table desc and namespace entries: table already exists`. #154397 + + diff --git a/src/current/_includes/releases/v25.3/v25.3.4.md b/src/current/_includes/releases/v25.3/v25.3.4.md index 517ccd73278..5c313524198 100644 --- a/src/current/_includes/releases/v25.3/v25.3.4.md +++ b/src/current/_includes/releases/v25.3/v25.3.4.md @@ -6,6 +6,5 @@ Release Date: October 30, 2025

Bug fixes

-- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. [#156313][#156313] +- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. #156313 -[#156313]: https://github.com/cockroachdb/cockroach/pull/156313 diff --git a/src/current/_includes/releases/v25.3/v25.3.5.md b/src/current/_includes/releases/v25.3/v25.3.5.md index 3d7c169ba60..b75ed0214a3 100644 --- a/src/current/_includes/releases/v25.3/v25.3.5.md +++ b/src/current/_includes/releases/v25.3/v25.3.5.md @@ -6,18 +6,14 @@ Release Date: November 14, 2025

SQL language changes

-- Added the `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. [#156592][#156592] -- Added the `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. [#156592][#156592] -- Added the `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. [#156592][#156592] -- Added the `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. [#156592][#156592] +- Added the `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. #156592 +- Added the `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. #156592 +- Added the `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. #156592 +- Added the `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. #156592

Bug fixes

-- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. [#155964][#155964] -- Fixed a bug where CockroachDB didn't include reads and writes performed by routines (user-defined functions and stored procedures) as well as apply joins into the `bytes read`, `rows read`, and `rows written` statement execution statistics. The bug had been present since before v23.2. [#156501][#156501] -- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. [#156551][#156551] +- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. #155964 +- Fixed a bug where CockroachDB didn't include reads and writes performed by routines (user-defined functions and stored procedures) as well as apply joins into the `bytes read`, `rows read`, and `rows written` statement execution statistics. The bug had been present since before v23.2. #156501 +- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. #156551 -[#156592]: https://github.com/cockroachdb/cockroach/pull/156592 -[#155964]: https://github.com/cockroachdb/cockroach/pull/155964 -[#156501]: https://github.com/cockroachdb/cockroach/pull/156501 -[#156551]: https://github.com/cockroachdb/cockroach/pull/156551 diff --git a/src/current/_includes/releases/v25.3/v25.3.6.md b/src/current/_includes/releases/v25.3/v25.3.6.md index d561a1b3f7e..655f0424a44 100644 --- a/src/current/_includes/releases/v25.3/v25.3.6.md +++ b/src/current/_includes/releases/v25.3/v25.3.6.md @@ -6,30 +6,21 @@ Release Date: December 12, 2025

Bug fixes

-- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. [#156522][#156522] -- Fixed a bug where the "atomic" `COPY` command (controlled via the `copy_from_atomic_enabled` session setting, `true` by default) could encounter `RETRY_COMMIT_DEADLINE_EXCEEDED` transaction errors if the whole command took 1 minute or more. This bug occurred only when the vectorized engine was used for `COPY`. [#156694][#156694] -- Attempting to create a vector index with the legacy schema changer will now fail gracefully instead of crashing the node. [#156781][#156781] -- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. [#156978][#156978] -- Fixed a bug where CockroachDB could encounter an internal error when evaluating a `COPY FROM` command in a transaction after it was rolled back to a savepoint. The bug was present since before v23.2. [#157038][#157038] -- Fixed a bug where CockroachDB could encounter a `vector encoder doesn't support ForcePut yet` error when executing `COPY` commands concurrently with certain schema changes. The bug had existed since before v23.2. [#157199][#157199] -- Fixed a bug that could cause a schema change to be stuck in the reverting state if the `infer_rbr_region_col_using_constraint` storage parameter was being set at the same time as adding a constraint that had a foreign key violation. [#157843][#157843] +- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. #156522 +- Fixed a bug where the "atomic" `COPY` command (controlled via the `copy_from_atomic_enabled` session setting, `true` by default) could encounter `RETRY_COMMIT_DEADLINE_EXCEEDED` transaction errors if the whole command took 1 minute or more. This bug occurred only when the vectorized engine was used for `COPY`. #156694 +- Attempting to create a vector index with the legacy schema changer will now fail gracefully instead of crashing the node. #156781 +- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. #156978 +- Fixed a bug where CockroachDB could encounter an internal error when evaluating a `COPY FROM` command in a transaction after it was rolled back to a savepoint. The bug was present since before v23.2. #157038 +- Fixed a bug where CockroachDB could encounter a `vector encoder doesn't support ForcePut yet` error when executing `COPY` commands concurrently with certain schema changes. The bug had existed since before v23.2. #157199 +- Fixed a bug that could cause a schema change to be stuck in the reverting state if the `infer_rbr_region_col_using_constraint` storage parameter was being set at the same time as adding a constraint that had a foreign key violation. #157843

Performance improvements

-- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., `x = $1`). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. [#156791][#156791] +- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., `x = $1`). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. #156791

Miscellaneous

- Span config reconciliation jobs no longer fail on the - destination after failover from a PCR stream of a system virtual cluster. [#156811][#156811] - - -[#156791]: https://github.com/cockroachdb/cockroach/pull/156791 -[#156781]: https://github.com/cockroachdb/cockroach/pull/156781 -[#156978]: https://github.com/cockroachdb/cockroach/pull/156978 -[#157038]: https://github.com/cockroachdb/cockroach/pull/157038 -[#157843]: https://github.com/cockroachdb/cockroach/pull/157843 -[#156522]: https://github.com/cockroachdb/cockroach/pull/156522 -[#156694]: https://github.com/cockroachdb/cockroach/pull/156694 -[#157199]: https://github.com/cockroachdb/cockroach/pull/157199 -[#156811]: https://github.com/cockroachdb/cockroach/pull/156811 + destination after failover from a PCR stream of a system virtual cluster. #156811 + + diff --git a/src/current/_includes/releases/v25.3/v25.3.7.md b/src/current/_includes/releases/v25.3/v25.3.7.md index 3a0c895129c..a368b230cb5 100644 --- a/src/current/_includes/releases/v25.3/v25.3.7.md +++ b/src/current/_includes/releases/v25.3/v25.3.7.md @@ -6,32 +6,23 @@ Release Date: January 9, 2026

Bug fixes

-- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. [#158345][#158345] +- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. #158345 - Fixed a bug that could cause incorrect query results when using prepared statements with _NULL_ placeholders. The bug has existed since v21.2 and violated SQL _NULL_-equality semantics by returning rows with _NULL_ values when the result set should have been empty. From v21.2 to v25.3, the bug occurred when all of the following were true: - The query was run with an explicit or implicit prepared statement - The query had an equality filter on a placeholder and a `UNIQUE` column - The column contained _NULL_ values - The placeholder was assigned to _NULL_ during execution - - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. [#159069][#159069] -- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. [#159442][#159442] -- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through `CHECK` constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default in versions prior to v26.1. [#159442][#159442] -- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159442][#159442] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159547][#159547] -- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in 24.3.0 and has been present in all versions since. [#159776][#159776] -- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. [#160089][#160089] -- Fixed a deadlock that could occur when a statistics creation task panicked.[#160583][#160583] + - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. #159069 +- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. #159442 +- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through `CHECK` constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default in versions prior to v26.1. #159442 +- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. #159442 +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. #159547 +- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in 24.3.0 and has been present in all versions since. #159776 +- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. #160089 +- Fixed a deadlock that could occur when a statistics creation task panicked.#160583

Performance improvements

-- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. [#159386][#159386] -- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple `TableReaders` by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. [#160604][#160604] +- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. #159386 +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple `TableReaders` by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. #160604 -[#159442]: https://github.com/cockroachdb/cockroach/pull/159442 -[#159547]: https://github.com/cockroachdb/cockroach/pull/159547 -[#160583]: https://github.com/cockroachdb/cockroach/pull/160583 -[#160604]: https://github.com/cockroachdb/cockroach/pull/160604 -[#158345]: https://github.com/cockroachdb/cockroach/pull/158345 -[#159069]: https://github.com/cockroachdb/cockroach/pull/159069 -[#159776]: https://github.com/cockroachdb/cockroach/pull/159776 -[#160089]: https://github.com/cockroachdb/cockroach/pull/160089 -[#159386]: https://github.com/cockroachdb/cockroach/pull/159386 diff --git a/src/current/_includes/releases/v25.4/backward-incompatible.md b/src/current/_includes/releases/v25.4/backward-incompatible.md index 87eacf5d216..9826c0c2da9 100644 --- a/src/current/_includes/releases/v25.4/backward-incompatible.md +++ b/src/current/_includes/releases/v25.4/backward-incompatible.md @@ -1,13 +1,10 @@ - `bulkio.backup.deprecated_full_backup_with_subdir.enabled` - Removed the `bulkio.backup.deprecated_full_backup_with_subdir.enabled` cluster setting. This optional ability to specify a target subdirectory with the `BACKUP` command when creating a full backup was deprecated in v22.1. [#153628][#153628] + Removed the `bulkio.backup.deprecated_full_backup_with_subdir.enabled` cluster setting. This optional ability to specify a target subdirectory with the `BACKUP` command when creating a full backup was deprecated in v22.1. #153628 - `sql.schema.approx_max_object_count` (default: `20000`) - Added cluster setting `sql.schema.approx_max_object_count` to prevent creation of new schema objects when the limit is exceeded. The check uses cached table statistics for performance and is approximate - it may not be immediately accurate until table statistics are updated by the background statistics refreshing job. Clusters that have been running stably with a larger object count should raise the limit or disable the limit by setting the value to `0`. In future releases, the default value for this setting will be raised as more CockroachDB features support larger object counts. [#154576][#154576] + Added cluster setting `sql.schema.approx_max_object_count` to prevent creation of new schema objects when the limit is exceeded. The check uses cached table statistics for performance and is approximate - it may not be immediately accurate until table statistics are updated by the background statistics refreshing job. Clusters that have been running stably with a larger object count should raise the limit or disable the limit by setting the value to `0`. In future releases, the default value for this setting will be raised as more CockroachDB features support larger object counts. #154576 -- This release includes a fix in Kafka topic creation, resolving a bug where changefeeds using a field or table name containing capital letters would result in the unicode value for a quotation mark `_u0022_` being used in the topic name instead of the intended double quote `"` character. As a result of this fix, existing changefeed topics that contain the `_u0022_` unicode are updated in v25.4 to automatically have the topic name updated to use double quote characters instead. Update your Kafka sinks to account for the topic name change. [#149438][#149438] +- This release includes a fix in Kafka topic creation, resolving a bug where changefeeds using a field or table name containing capital letters would result in the unicode value for a quotation mark `_u0022_` being used in the topic name instead of the intended double quote `"` character. As a result of this fix, existing changefeed topics that contain the `_u0022_` unicode are updated in v25.4 to automatically have the topic name updated to use double quote characters instead. Update your Kafka sinks to account for the topic name change. #149438 -[#149438]: https://github.com/cockroachdb/cockroach/pull/149438 -[#153628]: https://github.com/cockroachdb/cockroach/pull/153628 -[#154576]: https://github.com/cockroachdb/cockroach/pull/154576 diff --git a/src/current/_includes/releases/v25.4/cluster-setting-changes.md b/src/current/_includes/releases/v25.4/cluster-setting-changes.md index 4d1daf5cffa..b6e5c976d82 100644 --- a/src/current/_includes/releases/v25.4/cluster-setting-changes.md +++ b/src/current/_includes/releases/v25.4/cluster-setting-changes.md @@ -4,77 +4,61 @@ Changes to [cluster settings]({% link v25.4/cluster-settings.md %}) should be re - `changefeed.progress.frontier_persistence.interval` (default: `30s`) - Changefeeds will now periodically persist their entire span frontiers so that fewer duplicates will need to be emitted during restarts. The default persistence interval is 30s, but this can be configured with the `changefeed.progress.frontier_persistence.interval` cluster setting. [#153491][#153491] + Changefeeds will now periodically persist their entire span frontiers so that fewer duplicates will need to be emitted during restarts. The default persistence interval is 30s, but this can be configured with the `changefeed.progress.frontier_persistence.interval` cluster setting. #153491 - `log.channel_compatibility_mode.enabled` (default: `true`) - - In a future major release, changefeed events will be logged to the `CHANGEFEED` logging channel instead of `TELEMETRY`. To test the impact of this change before upgrading, set the cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects changefeed logs to the `CHANGEFEED` channel and should be tested only in non-production environments. [#151807][#151807] - - In a future major release, SQL performance events will be logged to the `SQL_EXEC` channel instead of the `SQL_PERF` and `SQL_INTERNAL_PERF` channels. To test the impact of this change, you can set the new cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects SQL performance logs to the `SQL_EXEC` channel. This setting should not be used in production environments, as it may affect downstream logging pipelines. [#151827][#151827] - - In a future major release, `sampled_query` and `sampled_transaction` events will move from the `TELEMETRY` channel to the `SQL_EXEC` logging channel. To test for potential logging pipeline impacts of these changes, set `log.channel_compatibility_mode.enabled` to `false`. Avoid testing in production, as this setting changes live log behavior. [#151949][#151949] + - In a future major release, changefeed events will be logged to the `CHANGEFEED` logging channel instead of `TELEMETRY`. To test the impact of this change before upgrading, set the cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects changefeed logs to the `CHANGEFEED` channel and should be tested only in non-production environments. #151807 + - In a future major release, SQL performance events will be logged to the `SQL_EXEC` channel instead of the `SQL_PERF` and `SQL_INTERNAL_PERF` channels. To test the impact of this change, you can set the new cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects SQL performance logs to the `SQL_EXEC` channel. This setting should not be used in production environments, as it may affect downstream logging pipelines. #151827 + - In a future major release, `sampled_query` and `sampled_transaction` events will move from the `TELEMETRY` channel to the `SQL_EXEC` logging channel. To test for potential logging pipeline impacts of these changes, set `log.channel_compatibility_mode.enabled` to `false`. Avoid testing in production, as this setting changes live log behavior. #151949 - `sql.catalog.allow_leased_descriptors.enabled` (default: `false`) - Added the `sql.catalog.allow_leased_descriptors.enabled` cluster setting, which is false by default. When set to true, queries that access the `pg_catalog` or `information_schema` can use cached leased descriptors to populate the data in those tables, with the tradeoff that some of the data could be stale. [#154491][#154491] + Added the `sql.catalog.allow_leased_descriptors.enabled` cluster setting, which is false by default. When set to true, queries that access the `pg_catalog` or `information_schema` can use cached leased descriptors to populate the data in those tables, with the tradeoff that some of the data could be stale. #154491 - `sql.log.scan_row_count_misestimate.enabled` (default: `false`) - Added a cluster setting (`sql.log.scan_row_count_misestimate.enabled`) that enables logging a warning on the gateway node when optimizer estimates for scans are inaccurate. The log message includes the table and index being scanned, the estimated and actual row counts, the time since the last table stats collection, and the table's estimated staleness. [#155123][#155123] + Added a cluster setting (`sql.log.scan_row_count_misestimate.enabled`) that enables logging a warning on the gateway node when optimizer estimates for scans are inaccurate. The log message includes the table and index being scanned, the estimated and actual row counts, the time since the last table stats collection, and the table's estimated staleness. #155123 - `sql.stats.error_on_concurrent_create_stats.enabled` (default: `true`) - Introduced the cluster setting `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. [#149538][#149538] + Introduced the cluster setting `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. #149538 - `sql.trace.txn.include_internal.enabled` (default: `true`) - You can now exclude internal transactions from probabilistic transaction tracing and latency-based logging by setting the `sql.trace.txn.include_internal.enabled` cluster setting to false. This setting is enabled by default to preserve the current behavior, but disabling it is recommended when debugging customer workloads to reduce noise in trace output. [#151433][#151433] + You can now exclude internal transactions from probabilistic transaction tracing and latency-based logging by setting the `sql.trace.txn.include_internal.enabled` cluster setting to false. This setting is enabled by default to preserve the current behavior, but disabling it is recommended when debugging customer workloads to reduce noise in trace output. #151433 - `sql.trace.txn.jaeger_json_output.enabled` (default: `false`) - You can now output transaction traces to the logs in Jaeger-compatible JSON format. This is controlled by the `sql.trace.txn.jaeger_json_output.enabled` cluster setting, which is disabled by default. When enabled, traces triggered by probabilistic sampling or statement latency thresholds will be formatted for easier ingestion by tools that support the Jaeger tracing format. [#151414][#151414] + You can now output transaction traces to the logs in Jaeger-compatible JSON format. This is controlled by the `sql.trace.txn.jaeger_json_output.enabled` cluster setting, which is disabled by default. When enabled, traces triggered by probabilistic sampling or statement latency thresholds will be formatted for easier ingestion by tools that support the Jaeger tracing format. #151414 - `storage.unhealthy_write_duration` (default: `20s`) - Added the cluster setting `storage.unhealthy_write_duration` (defaults to 20s), which is used to indicate to the allocator that a store's disk is unhealthy. The cluster setting `kv.allocator.disk_unhealthy_io_overload_score` controls the overload score assigned to a store with an unhealthy disk, where a higher score results in preventing lease or replica transfers to the store, or shedding of leases by the store. The default value of that setting is 0, so the allocator behavior is unaffected. [#154459][#154459] + Added the cluster setting `storage.unhealthy_write_duration` (defaults to 20s), which is used to indicate to the allocator that a store's disk is unhealthy. The cluster setting `kv.allocator.disk_unhealthy_io_overload_score` controls the overload score assigned to a store with an unhealthy disk, where a higher score results in preventing lease or replica transfers to the store, or shedding of leases by the store. The default value of that setting is 0, so the allocator behavior is unaffected. #154459
Settings with changed defaults
-- `feature.vector_index.enabled` now defaults to `true`. Vector indexing is now enabled by default. [#155561][#155561] +- `feature.vector_index.enabled` now defaults to `true`. Vector indexing is now enabled by default. #155561 -- `storage.value_separation.enabled` now defaults to `true`. This enables [value separation]({% link v25.4/architecture/storage-layer.md %}#value-separation) for SSTables, where values exceeding a certain size threshold are stored in separate blob files rather than inline in the SSTable. This helps improve write performance (write amplification) by avoiding rewriting such values during compactions. [#148857][#148857] +- `storage.value_separation.enabled` now defaults to `true`. This enables [value separation]({% link v25.4/architecture/storage-layer.md %}#value-separation) for SSTables, where values exceeding a certain size threshold are stored in separate blob files rather than inline in the SSTable. This helps improve write performance (write amplification) by avoiding rewriting such values during compactions. #148857
Removed settings
- `bulkio.backup.deprecated_full_backup_with_subdir.enabled` - Removed the `bulkio.backup.deprecated_full_backup_with_subdir.enabled` cluster setting. This optional ability to specify a target subdirectory with the `BACKUP` command when creating a full backup was deprecated in v22.1. [#153628][#153628] + Removed the `bulkio.backup.deprecated_full_backup_with_subdir.enabled` cluster setting. This optional ability to specify a target subdirectory with the `BACKUP` command when creating a full backup was deprecated in v22.1. #153628 - `storage.columnar_blocks.enabled` {% comment %}TODO: Verify with jbowens - Is this backward-incompatible since it can't be disabled anymore?{% endcomment %} - Removed the `storage.columnar_blocks.enabled` cluster setting; columnar blocks are always enabled. [#149371][#149371] + Removed the `storage.columnar_blocks.enabled` cluster setting; columnar blocks are always enabled. #149371
Other setting changes
- `sql.ttl.replan_flow_threshold` {% comment %}Verify with spilchen{% endcomment %} - Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. [#150771][#150771] - -- Updated the redaction policy for cluster settings in `debug zip` output. All "sensitive" settings are now redacted in all debug zips, whether or not redaction is explicitly requested. In redacted debug zips, both "sensitive" and "non-reportable" settings are redacted. This replaces the previous behavior, which redacted all string-type settings only in redacted debug zips. [#150364][#150364] - -- Added a new file, `cluster_settings_history.txt`, to debug zips. This file contains a history of cluster setting changes based on the system event log table. The history is only available while the corresponding events remain in the table. Sensitive settings are always redacted, and non-reportable settings are redacted when the debug zip is generated with redaction enabled. [#151066][#151066] - -[#149371]: https://github.com/cockroachdb/cockroach/pull/149371 -[#149538]: https://github.com/cockroachdb/cockroach/pull/149538 -[#150364]: https://github.com/cockroachdb/cockroach/pull/150364 -[#150771]: https://github.com/cockroachdb/cockroach/pull/150771 -[#151066]: https://github.com/cockroachdb/cockroach/pull/151066 -[#151414]: https://github.com/cockroachdb/cockroach/pull/151414 -[#151433]: https://github.com/cockroachdb/cockroach/pull/151433 -[#151807]: https://github.com/cockroachdb/cockroach/pull/151807 -[#151827]: https://github.com/cockroachdb/cockroach/pull/151827 -[#151949]: https://github.com/cockroachdb/cockroach/pull/151949 -[#153491]: https://github.com/cockroachdb/cockroach/pull/153491 -[#153628]: https://github.com/cockroachdb/cockroach/pull/153628 -[#154459]: https://github.com/cockroachdb/cockroach/pull/154459 -[#154491]: https://github.com/cockroachdb/cockroach/pull/154491 -[#155123]: https://github.com/cockroachdb/cockroach/pull/155123 -[#155561]: https://github.com/cockroachdb/cockroach/pull/155561 + Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. #150771 + +- Updated the redaction policy for cluster settings in `debug zip` output. All "sensitive" settings are now redacted in all debug zips, whether or not redaction is explicitly requested. In redacted debug zips, both "sensitive" and "non-reportable" settings are redacted. This replaces the previous behavior, which redacted all string-type settings only in redacted debug zips. #150364 + +- Added a new file, `cluster_settings_history.txt`, to debug zips. This file contains a history of cluster setting changes based on the system event log table. The history is only available while the corresponding events remain in the table. Sensitive settings are always redacted, and non-reportable settings are redacted when the debug zip is generated with redaction enabled. #151066 + diff --git a/src/current/_includes/releases/v25.4/deprecations.md b/src/current/_includes/releases/v25.4/deprecations.md index be5c5b1192e..4197ea14e9f 100644 --- a/src/current/_includes/releases/v25.4/deprecations.md +++ b/src/current/_includes/releases/v25.4/deprecations.md @@ -1,13 +1,9 @@ The following deprecations/removals are announced in v25.4. -- The functionality provided by session variable `enforce_home_region_follower_reads_enabled` was deprecated in v24.2.4 and is now removed. {% comment %}TODO: Verify with michae2 - Is this backward-incompatible?{% endcomment %}(The variable itself remains for backward compatibility but has no effect.) Note that the related session variable `enforce_home_region` is **not** deprecated and still functions normally. [#148314][#148314] +- The functionality provided by session variable `enforce_home_region_follower_reads_enabled` was deprecated in v24.2.4 and is now removed. {% comment %}TODO: Verify with michae2 - Is this backward-incompatible?{% endcomment %}(The variable itself remains for backward compatibility but has no effect.) Note that the related session variable `enforce_home_region` is **not** deprecated and still functions normally. #148314 - The cluster settings `storage.columnar_blocks.enabled` and `bulkio.backup.deprecated_full_backup_with_subdir.enabled` have been removed. For details, refer to [Removed settings](#v25-4-0-settings-removed). -- The bespoke restore and import event logs have been deprecated. For any deployment that is reliant on those logs, use the status change event log which now plumbs the SQL user that owns the job. [#153889][#153889] +- The bespoke restore and import event logs have been deprecated. For any deployment that is reliant on those logs, use the status change event log which now plumbs the SQL user that owns the job. #153889 -- The `incremental_location` backup option is now deprecated and will be removed in a future release. This feature was added so customers could define different TTL policies for incremental backups vs full backups. Users can still do this since incremental backups are by default stored in a distinct directory relative to full backups (`{collection_root}/incrementals`). [#153890][#153890] - -[#148314]: https://github.com/cockroachdb/cockroach/pull/148314 -[#153889]: https://github.com/cockroachdb/cockroach/pull/153889 -[#153890]: https://github.com/cockroachdb/cockroach/pull/153890 \ No newline at end of file +- The `incremental_location` backup option is now deprecated and will be removed in a future release. This feature was added so customers could define different TTL policies for incremental backups vs full backups. Users can still do this since incremental backups are by default stored in a distinct directory relative to full backups (`{collection_root}/incrementals`). #153890 diff --git a/src/current/_includes/releases/v25.4/v25.4.0-alpha.1.md b/src/current/_includes/releases/v25.4/v25.4.0-alpha.1.md index 09af6856d1f..3291a1f7cc6 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0-alpha.1.md +++ b/src/current/_includes/releases/v25.4/v25.4.0-alpha.1.md @@ -12,7 +12,7 @@ Release Date: September 17, 2025 name and the fallback *userinfo* JSON key are configurable by `server.jwt_authentication.group_claim` and `server.jwt_authentication.userinfo_group_key` respectively. - The behavior matches the existing LDAP role-sync feature. [#147318][#147318] + The behavior matches the existing LDAP role-sync feature. #147318 - CockroachDB can now synchronize SQL role membership from the `groups` claim provided by an OpenID Connect (OIDC) Identity Provider when @@ -22,10 +22,10 @@ Release Date: September 17, 2025 verified ID token and, when available, the access token (if a JWT). Any groups found in either token are combined and deduplicated. If no claim is present in either, the provider's `/userinfo` endpoint is queried for groups, - as a final fallback. [#147706][#147706] + as a final fallback. #147706 - The JWT Authorization settings which - were added in [#147318](https://github.com/cockroachdb/cockroach/pull/147318) are no longer visible to users in v25.3. They - will be re-introduced in v25.4. [#149189][#149189] + were added in #147318 are no longer visible to users in v25.3. They + will be re-introduced in v25.4. #149189 - The following provisioning usability metric counters were added for LDAP-based user provisioning. @@ -36,55 +36,55 @@ Release Date: September 17, 2025 - A counter for the number of auto-provisioned users (`auth.provisioning.ldap.success`). - A telemetry counter for number of logins performed by provisioned users - (`auth.provisioning.login_success`). [#150476][#150476] + (`auth.provisioning.login_success`). #150476

General changes

-- For virtual clusters, hot range logging is now performed by a single job on one node, rather than by tasks on every node. [#145549][#145549] +- For virtual clusters, hot range logging is now performed by a single job on one node, rather than by tasks on every node. #145549 - The CREATE CHANGEFEED statement now supports the `extra_headers` option, which can be used to specify extra headers for webhook and kafka sinks. This can be used to add headers to all - messages sent to the sink. [#146813][#146813] -- Added new metrics: `changefeed.stage.pts.create.latency`, `changefeed.stage.pts.manage.latency`, `changefeed.stage.pts.manage_error.latency`, to measure the performance of managing protected ts records. [#148471][#148471] -- Added an OTLP log sink that exports logs in OpenTelemetry Protocol format over gRPC to compatible targets such as `otel-collector`, Datadog, and Loki. [#148525][#148525] + messages sent to the sink. #146813 +- Added new metrics: `changefeed.stage.pts.create.latency`, `changefeed.stage.pts.manage.latency`, `changefeed.stage.pts.manage_error.latency`, to measure the performance of managing protected ts records. #148471 +- Added an OTLP log sink that exports logs in OpenTelemetry Protocol format over gRPC to compatible targets such as `otel-collector`, Datadog, and Loki. #148525 - Kafka v2 changefeed sinks now support a cluster setting that enables detailed error logging for messages - exceeding Kafka v2 size limit. [#148753][#148753] -- The CockroachDB spatial libraries now rely on GEOS 3.12 instead of GEOS 3.11. [#148859][#148859] -- Changefeeds with the protobuf format now support the `resolved` option for emitting resolved timestamps. [#149622][#149622] + exceeding Kafka v2 size limit. #148753 +- The CockroachDB spatial libraries now rely on GEOS 3.12 instead of GEOS 3.11. #148859 +- Changefeeds with the protobuf format now support the `resolved` option for emitting resolved timestamps. #149622 - Changefeeds using the protobuf format - now support wrapped envelopes in kafka sinks [#149696][#149696] + now support wrapped envelopes in kafka sinks #149696 - Restore jobs now log errors on retry to - the job messages table. [#149821][#149821] + the job messages table. #149821 - A warning is now emitted when creating or altering a changefeed with `resolved` or `min_checkpoint_frequency` set below 500ms. This helps - users understand the tradeoff between message latency and cluster CPU usage. [#149975][#149975] + users understand the tradeoff between message latency and cluster CPU usage. #149975 - The protobuf format for changefeeds - now support enriched envelopes. [#150501][#150501] -- Added HTTP mode to the OTLP sink, allowing logs to be exported to OpenTelemetry Protocol (OTLP) targets over HTTP. This enhancement enables agentless deployments, where logs can be sent directly to supported targets like Datadog or Grafana, without requiring an intermediary such as the OpenTelemetry Collector or Datadog Agent. [#150655][#150655] -- Added `headers` configuration option to OTLP log sink. [#150696][#150696] -- CockroachDB spatial libraries now rely on GEOS 3.13 instead of GEOS 3.12. [#151186][#151186] -- Reduced the maximum backoff for changefeed retries from 10 minutes to 1 minute, which results in faster recovery from transient errors. [#146448][#146448] -- Added `changefeed.sink_backpressure_nanos` metric to track time spent waiting for quota when emitting to the sink. [#150666][#150666] -- The download phase of restore operations now will retry downloads before giving up, when faced with an error. [#148821][#148821] -- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. [#151041][#151041] -- Fixed a rare bug in restore where an object storage error on restore start could cause restore to report success without creating the restored tables or databases. [#151148][#151148] -- Tuned S3 client retry behavior to be more reliable in the presence of correlated errors. [#151817][#151817] + now support enriched envelopes. #150501 +- Added HTTP mode to the OTLP sink, allowing logs to be exported to OpenTelemetry Protocol (OTLP) targets over HTTP. This enhancement enables agentless deployments, where logs can be sent directly to supported targets like Datadog or Grafana, without requiring an intermediary such as the OpenTelemetry Collector or Datadog Agent. #150655 +- Added `headers` configuration option to OTLP log sink. #150696 +- CockroachDB spatial libraries now rely on GEOS 3.13 instead of GEOS 3.12. #151186 +- Reduced the maximum backoff for changefeed retries from 10 minutes to 1 minute, which results in faster recovery from transient errors. #146448 +- Added `changefeed.sink_backpressure_nanos` metric to track time spent waiting for quota when emitting to the sink. #150666 +- The download phase of restore operations now will retry downloads before giving up, when faced with an error. #148821 +- Fixed a memory accounting issue in the client certificate cache that caused multiple allocations to be reported for the same certificate. The cache now accurately tracks memory usage and includes a safeguard to prevent it from negatively affecting SQL operations. #151041 +- Fixed a rare bug in restore where an object storage error on restore start could cause restore to report success without creating the restored tables or databases. #151148 +- Tuned S3 client retry behavior to be more reliable in the presence of correlated errors. #151817

SQL language changes

-- Implemented the `levenshtein_less_equal(string, string, int)` and `levenshtein_less_equal(string, string, int, int, int, int)` built-in functions, which calculate the Levenshtein distance between two strings. [#104649][#104649] -- The owner of a database can now set default session variables per database using the `ALTER ROLE ALL IN DATABASE ... SET` or `ALTER DATABASE ... SET` commands. [#130547][#130547] +- Implemented the `levenshtein_less_equal(string, string, int)` and `levenshtein_less_equal(string, string, int, int, int, int)` built-in functions, which calculate the Levenshtein distance between two strings. #104649 +- The owner of a database can now set default session variables per database using the `ALTER ROLE ALL IN DATABASE ... SET` or `ALTER DATABASE ... SET` commands. #130547 - Added support for camelCase parameter names (e.g., `SharedAccessKeyName`) in Azure Event Hub Kafka sink - configuration [#144735][#144735] + configuration #144735 - Added a new `PROVISIONSRC` role option. This role option should be prefixed with the HBA auth method for provisioning, i.e. `ldap` followed by the IDP URI, for example `ldap:ldap.example.com`. This is intended to be used only internally for user provisioning and should be - view-only when checking set role options for a user. [#147272][#147272] + view-only when checking set role options for a user. #147272 - Added a new cluster setting `server.provisioning.ldap.enabled` which can be set to `true` to conditionally enable user provisioning during SQL cluster authentication. The user @@ -93,122 +93,122 @@ Release Date: September 17, 2025 privileged to perform SQL authentication and will mandatory have a role option for `PROVISIONSRC` set to `ldap:`. Any group roles that are to be assigned via LDAP authorization must be pre created prior to the authentication - start. [#148200][#148200] + start. #148200 - Added the ability to automatically provision users authenticating via JWT. This is controlled by the new cluster setting `security.provisioning.jwt.enabled`. When set to `true`, a successful JWT authentication for a non-existent user will create that user in CockroachDB. The newly created role will have the `PROVISIONSRC` role option set to `jwt_token:`, identifying the - token's issuer as the source of the provisioned user. [#149415][#149415] -- The `CITEXT` data type is now supported, enabling case-insensitive comparisons for `CITEXT` columns. Internally, `CITEXT` is equivalent to using the undetermined level 2 collation `und-u-ks-level2`. For example, under `CITEXT`, the expression `'test' = 'TEST'` returns `TRUE`. [#147864][#147864] -- The functionality provided by session variable `enforce_home_region_follower_reads_enabled` was deprecated in v24.2.4 and is now removed. (The variable itself remains for backward compatibility but has no effect.) Note that the related session variable `enforce_home_region` is **not** deprecated and still functions normally. [#148314][#148314] -- Added support for automatically determining the region column for a `REGIONAL BY ROW` table using a foreign key constraint. The foreign key is specified by setting a new table storage parameter `infer_rbr_region_col_using_constraint`, and must contain the region column. This can be useful for applications that are unable to guarantee that a child row is inserted or updated from the same region as the matching parent row. [#148540][#148540] -- Added support for invoking a UDF from a view query. Renaming or setting the schema on the routine is currently not allowed if it is referenced by a view. [#148616][#148616] -- Updated the `SHOW CREATE FUNCTION` and `SHOW CREATE PROCEDURE` statements to show fully qualified table names rather than assuming they are qualified with the current database. [#148746][#148746] -- Added the `has_system_privilege` builtin function, which can be used to check if a user has the given system privilege. [#149051][#149051] -- Updated schema change job status messages to be more user-friendly and descriptive, instead of using internal schema change architecture terminology. [#149096][#149096] -- The logical cluster now uses an external connection and automatically updates its configuration when that connection changes. [#149261][#149261] + token's issuer as the source of the provisioned user. #149415 +- The `CITEXT` data type is now supported, enabling case-insensitive comparisons for `CITEXT` columns. Internally, `CITEXT` is equivalent to using the undetermined level 2 collation `und-u-ks-level2`. For example, under `CITEXT`, the expression `'test' = 'TEST'` returns `TRUE`. #147864 +- The functionality provided by session variable `enforce_home_region_follower_reads_enabled` was deprecated in v24.2.4 and is now removed. (The variable itself remains for backward compatibility but has no effect.) Note that the related session variable `enforce_home_region` is **not** deprecated and still functions normally. #148314 +- Added support for automatically determining the region column for a `REGIONAL BY ROW` table using a foreign key constraint. The foreign key is specified by setting a new table storage parameter `infer_rbr_region_col_using_constraint`, and must contain the region column. This can be useful for applications that are unable to guarantee that a child row is inserted or updated from the same region as the matching parent row. #148540 +- Added support for invoking a UDF from a view query. Renaming or setting the schema on the routine is currently not allowed if it is referenced by a view. #148616 +- Updated the `SHOW CREATE FUNCTION` and `SHOW CREATE PROCEDURE` statements to show fully qualified table names rather than assuming they are qualified with the current database. #148746 +- Added the `has_system_privilege` builtin function, which can be used to check if a user has the given system privilege. #149051 +- Updated schema change job status messages to be more user-friendly and descriptive, instead of using internal schema change architecture terminology. #149096 +- The logical cluster now uses an external connection and automatically updates its configuration when that connection changes. #149261 - Fixed a bug where extra quotes or escaped quote characters would be added to topic names in changefeeds. Can be turned off by setting `feature.changefeed.bare_table_names` to - false. [#149438][#149438] + false. #149438 - The users with the role option `PROVISIONSRC` assigned to them will be unable to change their own password overriding any config set for sql.auth.change_own_password.enabled cluster setting. Changing other role options still has the same privilege requirements as before (either CREATEROLE or CREATELOGIN, depending on the option). The role option for PROVISIONSRC is also only assignable and cannot be altered using `ALTER role` - command. [#149463][#149463] -- The session setting `optimizer_prefer_bounded_cardinality` is now enabled by default. This setting instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. [#149486][#149486] -- The session setting `optimizer_min_row_count`, which sets a lower bound on row count estimates for relational expressions during query planning, is now set to `1` by default. [#149602][#149602] + command. #149463 +- The session setting `optimizer_prefer_bounded_cardinality` is now enabled by default. This setting instructs the optimizer to prefer query plans where every expression has a guaranteed upper-bound on the number of rows it will process. #149486 +- The session setting `optimizer_min_row_count`, which sets a lower bound on row count estimates for relational expressions during query planning, is now set to `1` by default. #149602 - WITH header_row flag is added to EXPORT. Returns error for non-csv type. Another row is prepended to the csv file - with the column names. [#149686][#149686] + with the column names. #149686 - Users can now ALTER EXTERNAL CONNECTION to change the external connection URI when granted UPDATE privilege on EXTERNAL CONNECTION. - Fixes #98610 [#149869][#149869] -- The `json ? string`, `json ?& array`, `json ?| array`, and `array && array` operators are now index-accelerated for `INVERTED JOIN` statements if there is an inverted index on the JSON column referenced on the left-hand side of the expression. [#149898][#149898] -- The `SHOW ROLES` and `SHOW USERS` commands now include an `estimated_last_login_time` column that displays the estimated timestamp of when each user last authenticated to the database. This column shows `NULL` for users who have never logged in, and for existing users after upgrading to v25.3 until their next login. The tracking is performed on a best-effort basis and may not capture every login event. [#150105][#150105] -- The `options` column in the output of `SHOW ROLES` and `SHOW USERS` is now returned as an array of strings (e.g., `{NOLOGIN,CREATEDB}`) rather than as a single comma-separated string. This enables more efficient querying of role options using array functions like `unnest()`. For example: `SELECT * FROM [SHOW ROLES] AS r WHERE EXISTS (SELECT 1 FROM unnest(r.options) AS m(option) WHERE option LIKE 'SUBJECT=cn%');` [#148532][#148532] -- The session setting `optimizer_min_row_count`, which sets a lower bound on row count estimates for relational expressions during query planning, is now set to `1` by default. [#150376][#150376] -- `LTREE` is now supported with ancestry operators and with the `concat` operator. Specifically, CockroachDB now allows `ltree @> ltree`, `ltree[] @> ltree`, `ltree @> ltree[]`, `ltree <@ ltree`, `ltree[] <@ ltree`, and `ltree <@ ltree[]` binary comparisons, as well as `ltree[] ?@> ltree`, `ltree[] ?<@ ltree`, and `ltree || ltree` binary operations. The `?@>` and `?<@` are new binary operators that return the first ltree (or `NULL`) that is an ancestor or descendant of the right ltree argument in the array. [#150598][#150598] -- Clusters utilizing cluster virtualization, such as those running Physical Cluster Replication (PCR), apply the same admission control (AC) pacing to various bulk operations used by clusters that are not running with cluster virtualization. [#150633][#150633] -- All PostgreSQL built-in functions for `LTREE` are now supported: `subltree()`, `subpath()`, `nlevel()`, `index()`, `text2ltree()`, `ltree2text()`, and `lca()`. While the `lca()` function in PostgreSQL specifically limits up to 8 LTREE args, the CockroachDB `lca()` function accepts any variable number of ltree args. [#150647][#150647] -- `CREATE USER` and `GRANT` role operations now wait for full-cluster visibility of the new user table version rather than blocking on convergence. [#150747][#150747] -- Improved the optimizer to hoist projections above joins in more cases, which can lead to better query plans. This behavior can be enabled with the new session variable `optimizer_use_improved_hoist_join_project`. [#150887][#150887] -- Previously, using a pausable portal with a procedure call could cause a panic, depending on the function body. Now, transaction control statements such as procedure calls (e.g., `CALL myfunc()`) are disallowed within pausable portals. [#151153][#151153] -- Added the `allow_unsafe_internals` session variable to gate access to system database internals. Default access is allowed to support testing. [#151362][#151362] -- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. [#151423][#151423] -- Added metrics for statements executed within a stored procedure or function. The following metrics count statements that began execution, including those that failed: `sql_routine_select_started_count`, `sql_routine_update_started_count`, `sql_routine_insert_started_count`, and `sql_routine_delete_started_count`. The following metrics count only successful executions: `sql_routine_select_count`, `sql_routine_update_count`, `sql_routine_insert_count`, and `sql_routine_delete_count`. All counters are global and increment before the transaction is committed or aborted. [#151689][#151689] -- Introduced the `inspect_errors` system table. [#151821][#151821] -- Added a new session variable, `disable_optimizer_rules`, which allows users to provide a comma-separated list of optimizer rules to disable during query optimization. This allows users to avoid rules that are known to create a suboptimal query plan for specific queries. [#151959][#151959] -- The SQL observability statements `SHOW TRANSACTIONS`, `SHOW QUERIES`, and `SHOW SESSIONS` now include an `isolation_level` column that shows the isolation level of the active transaction, or the session's default isolation level when there is no active transaction. [#152352][#152352] -- The default value of `use_soft_limit_for_distribute_scan` session variable is now `true`. This means that, by default, the soft limit (if available) will be used to determine whether a scan is "large" and, thus, should be distributed. For example, with `estimated row count: 100 - 10,000`, CockroachDB will use `100` as the estimate to compare against the value of `distribute_scan_row_count_threshold`. [#152557][#152557] + Fixes #98610 #149869 +- The `json ? string`, `json ?& array`, `json ?| array`, and `array && array` operators are now index-accelerated for `INVERTED JOIN` statements if there is an inverted index on the JSON column referenced on the left-hand side of the expression. #149898 +- The `SHOW ROLES` and `SHOW USERS` commands now include an `estimated_last_login_time` column that displays the estimated timestamp of when each user last authenticated to the database. This column shows `NULL` for users who have never logged in, and for existing users after upgrading to v25.3 until their next login. The tracking is performed on a best-effort basis and may not capture every login event. #150105 +- The `options` column in the output of `SHOW ROLES` and `SHOW USERS` is now returned as an array of strings (e.g., `{NOLOGIN,CREATEDB}`) rather than as a single comma-separated string. This enables more efficient querying of role options using array functions like `unnest()`. For example: `SELECT * FROM [SHOW ROLES] AS r WHERE EXISTS (SELECT 1 FROM unnest(r.options) AS m(option) WHERE option LIKE 'SUBJECT=cn%');` #148532 +- The session setting `optimizer_min_row_count`, which sets a lower bound on row count estimates for relational expressions during query planning, is now set to `1` by default. #150376 +- `LTREE` is now supported with ancestry operators and with the `concat` operator. Specifically, CockroachDB now allows `ltree @> ltree`, `ltree[] @> ltree`, `ltree @> ltree[]`, `ltree <@ ltree`, `ltree[] <@ ltree`, and `ltree <@ ltree[]` binary comparisons, as well as `ltree[] ?@> ltree`, `ltree[] ?<@ ltree`, and `ltree || ltree` binary operations. The `?@>` and `?<@` are new binary operators that return the first ltree (or `NULL`) that is an ancestor or descendant of the right ltree argument in the array. #150598 +- Clusters utilizing cluster virtualization, such as those running Physical Cluster Replication (PCR), apply the same admission control (AC) pacing to various bulk operations used by clusters that are not running with cluster virtualization. #150633 +- All PostgreSQL built-in functions for `LTREE` are now supported: `subltree()`, `subpath()`, `nlevel()`, `index()`, `text2ltree()`, `ltree2text()`, and `lca()`. While the `lca()` function in PostgreSQL specifically limits up to 8 LTREE args, the CockroachDB `lca()` function accepts any variable number of ltree args. #150647 +- `CREATE USER` and `GRANT` role operations now wait for full-cluster visibility of the new user table version rather than blocking on convergence. #150747 +- Improved the optimizer to hoist projections above joins in more cases, which can lead to better query plans. This behavior can be enabled with the new session variable `optimizer_use_improved_hoist_join_project`. #150887 +- Previously, using a pausable portal with a procedure call could cause a panic, depending on the function body. Now, transaction control statements such as procedure calls (e.g., `CALL myfunc()`) are disallowed within pausable portals. #151153 +- Added the `allow_unsafe_internals` session variable to gate access to system database internals. Default access is allowed to support testing. #151362 +- When `sql_safe_updates` is enabled, the `ALTER TABLE ... LOCALITY` statement will be blocked when trying to convert an existing table to `REGIONAL BY ROW`, unless a region column has been added to the table. This protects against undesired behavior that caused `UPDATE` or `DELETE` statements to fail against the table while the locality change was in progress. #151423 +- Added metrics for statements executed within a stored procedure or function. The following metrics count statements that began execution, including those that failed: `sql_routine_select_started_count`, `sql_routine_update_started_count`, `sql_routine_insert_started_count`, and `sql_routine_delete_started_count`. The following metrics count only successful executions: `sql_routine_select_count`, `sql_routine_update_count`, `sql_routine_insert_count`, and `sql_routine_delete_count`. All counters are global and increment before the transaction is committed or aborted. #151689 +- Introduced the `inspect_errors` system table. #151821 +- Added a new session variable, `disable_optimizer_rules`, which allows users to provide a comma-separated list of optimizer rules to disable during query optimization. This allows users to avoid rules that are known to create a suboptimal query plan for specific queries. #151959 +- The SQL observability statements `SHOW TRANSACTIONS`, `SHOW QUERIES`, and `SHOW SESSIONS` now include an `isolation_level` column that shows the isolation level of the active transaction, or the session's default isolation level when there is no active transaction. #152352 +- The default value of `use_soft_limit_for_distribute_scan` session variable is now `true`. This means that, by default, the soft limit (if available) will be used to determine whether a scan is "large" and, thus, should be distributed. For example, with `estimated row count: 100 - 10,000`, CockroachDB will use `100` as the estimate to compare against the value of `distribute_scan_row_count_threshold`. #152557

Operational changes

- The `/health/restart_safety` endpoint indicates - when it is unsafe to terminate a node. [#142930][#142930] -- Added the following cluster settings for configuring blob file rewrite compactions: `storage.value_separation.rewrite_minimum_age` and `storage.value_separation.compaction_garbage_threshold`. [#148782][#148782] -- The default value of `server.mem_profile.total_dump_size_limit` (which controls how much space can be used by automatically collected heap profiles) has been increased from 256MiB to 512MiB. [#148848][#148848] -- Added new experimental values for compression cluster settings to the storage engine. [#148849][#148849] -- The `storage.value_separation.enabled` cluster setting is now enabled by default. This enables value separation for SSTables, where values exceeding a certain size threshold are stored in separate blob files rather than inline in the SSTable. This helps improve write performance (write amplification) by avoiding rewriting such values during compactions. [#148857][#148857] -- A structured event is now logged to the `SQL_SCHEMA` channel when the `REFRESH MATERIALIZED VIEW` statement is executed. [#149153][#149153] -- Removed the `storage.columnar_blocks.enabled` cluster setting; columnar blocks are always enabled. [#149371][#149371] + when it is unsafe to terminate a node. #142930 +- Added the following cluster settings for configuring blob file rewrite compactions: `storage.value_separation.rewrite_minimum_age` and `storage.value_separation.compaction_garbage_threshold`. #148782 +- The default value of `server.mem_profile.total_dump_size_limit` (which controls how much space can be used by automatically collected heap profiles) has been increased from 256MiB to 512MiB. #148848 +- Added new experimental values for compression cluster settings to the storage engine. #148849 +- The `storage.value_separation.enabled` cluster setting is now enabled by default. This enables value separation for SSTables, where values exceeding a certain size threshold are stored in separate blob files rather than inline in the SSTable. This helps improve write performance (write amplification) by avoiding rewriting such values during compactions. #148857 +- A structured event is now logged to the `SQL_SCHEMA` channel when the `REFRESH MATERIALIZED VIEW` statement is executed. #149153 +- Removed the `storage.columnar_blocks.enabled` cluster setting; columnar blocks are always enabled. #149371 - A new feature is now available that automatically captures Go execution traces on a scheduled interval. This feature incurs a performance penalty and is generally intended for use under the guidance of Cockroach Labs Support. It can be configured using the following cluster settings: - `obs.execution_tracer.interval`: Enables the tracer and sets the interval for capturing traces. Set to a value greater than 0 to activate. - `obs.execution_tracer.duration`: Specifies the duration for each captured trace. - - `obs.execution_tracer.total_dump_size_limit`: Sets the maximum disk space allowed for storing execution traces. Older traces are automatically deleted when this limit is reached. [#149373][#149373] -- Introduced the cluster setting `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. [#149538][#149538] -- The value of `sql.stats.error_on_concurrent_create_stats.enabled` now defaults to `false`, suppressing error counters for auto stats jobs that fail due to concurrent stats jobs in progress. [#149848][#149848] -- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. [#150771][#150771] -- Added `auth.ldap.conn.latency.internal` metric to denote the internal authentication time for LDAP auth method. [#151105][#151105] -- Introduced two new logging channels: `KV_EXEC` and `CHANGEFEED`. The `KV_EXEC` channel is intended for KV events that do not fall into the `KV_DISTRIBUTION` channel. The `CHANGEFEED` channel is intended for changefeed-related events that are currently logged to the `TELEMETRY` channel. This change does not include logic to move existing logs to the new channels. [#151692][#151692] -- Restricted access to internal tables in the `crdb_internal` schema. Only a predefined allowlist of internal objects is accessible when the session variable `allow_unsafe_internals` is enabled or when the caller is internal. [#151804][#151804] -- In a future major release, changefeed events will be logged to the `CHANGEFEED` logging channel instead of `TELEMETRY`. To test the impact of this change before upgrading, set the cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects changefeed logs to the `CHANGEFEED` channel and should be tested only in non-production environments. [#151807][#151807] -- In a future major release, SQL performance events will be logged to the `SQL_EXEC` channel instead of the `SQL_PERF` and `SQL_INTERNAL_PERF` channels. To test the impact of this change, you can set the new cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects SQL performance logs to the `SQL_EXEC` channel. This setting should not be used in production environments, as it may affect downstream logging pipelines. [#151827][#151827] -- Restricted access to all `crdb_internal` built-ins unless the session variable `allow_unsafe_internals` is set to `true`, or the caller is internal. [#151887][#151887] -- In a future major release, `sampled_query` and `sampled_transaction` events will move from the `TELEMETRY` channel to the `SQL_EXEC` logging channel. To test for potential logging pipeline impacts of these changes, set `log.channel_compatibility_mode.enabled` to `false`. Avoid testing in production, as this setting changes live log behavior. [#151949][#151949] -- Delegate queries (such as `SHOW DATABASES`) are now excluded from unsafe SQL checks that restrict access to the `system` database and `crdb_internal` schema. This change ensures that these commands continue to function even when access to internal components is otherwise restricted. [#152084][#152084] -- The Physical Cluster Replication (PCR) reader tenant is always destroyed on cutover [#152509][#152509] -- `SYSTEM` privileges are inherited in read-only mode in standby Physical Cluster Replication (PCR) clusters. [#149708][#149708] -- You can now output transaction traces to the logs in Jaeger-compatible JSON format. This is controlled by the `sql.trace.txn.jaeger_json_output.enabled` cluster setting, which is disabled by default. When enabled, traces triggered by probabilistic sampling or statement latency thresholds will be formatted for easier ingestion by tools that support the Jaeger tracing format. [#151414][#151414] -- You can now exclude internal transactions from probabilistic transaction tracing and latency-based logging by setting the `sql.trace.txn.include_internal.enabled` cluster setting to false. This setting is enabled by default to preserve the current behavior, but disabling it is recommended when debugging customer workloads to reduce noise in trace output. [#151433][#151433] + - `obs.execution_tracer.total_dump_size_limit`: Sets the maximum disk space allowed for storing execution traces. Older traces are automatically deleted when this limit is reached. #149373 +- Introduced the cluster setting `sql.stats.error_on_concurrent_create_stats.enabled`, which modifies how CockroachDB reacts to concurrent auto stats jobs. The default, `true`, maintains the previous behavior. Setting `sql.stats.error_on_concurrent_create_stats.enabled` to `false` will cause the concurrent auto stats job to be skipped with just a log entry and no increased error counters. #149538 +- The value of `sql.stats.error_on_concurrent_create_stats.enabled` now defaults to `false`, suppressing error counters for auto stats jobs that fail due to concurrent stats jobs in progress. #149848 +- Updated TTL job replanning to be less sensitive by focusing specifically on detecting when nodes become unavailable rather than reacting to all plan differences. The cluster setting `sql.ttl.replan_flow_threshold` may have been set to `0` to work around the TTL replanner being too sensitive; this fix will alleviate that and any instance that had set `replan_flow_threshold` to `0` can be reset back to the default. #150771 +- Added `auth.ldap.conn.latency.internal` metric to denote the internal authentication time for LDAP auth method. #151105 +- Introduced two new logging channels: `KV_EXEC` and `CHANGEFEED`. The `KV_EXEC` channel is intended for KV events that do not fall into the `KV_DISTRIBUTION` channel. The `CHANGEFEED` channel is intended for changefeed-related events that are currently logged to the `TELEMETRY` channel. This change does not include logic to move existing logs to the new channels. #151692 +- Restricted access to internal tables in the `crdb_internal` schema. Only a predefined allowlist of internal objects is accessible when the session variable `allow_unsafe_internals` is enabled or when the caller is internal. #151804 +- In a future major release, changefeed events will be logged to the `CHANGEFEED` logging channel instead of `TELEMETRY`. To test the impact of this change before upgrading, set the cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects changefeed logs to the `CHANGEFEED` channel and should be tested only in non-production environments. #151807 +- In a future major release, SQL performance events will be logged to the `SQL_EXEC` channel instead of the `SQL_PERF` and `SQL_INTERNAL_PERF` channels. To test the impact of this change, you can set the new cluster setting `log.channel_compatibility_mode.enabled` to `false`. This redirects SQL performance logs to the `SQL_EXEC` channel. This setting should not be used in production environments, as it may affect downstream logging pipelines. #151827 +- Restricted access to all `crdb_internal` built-ins unless the session variable `allow_unsafe_internals` is set to `true`, or the caller is internal. #151887 +- In a future major release, `sampled_query` and `sampled_transaction` events will move from the `TELEMETRY` channel to the `SQL_EXEC` logging channel. To test for potential logging pipeline impacts of these changes, set `log.channel_compatibility_mode.enabled` to `false`. Avoid testing in production, as this setting changes live log behavior. #151949 +- Delegate queries (such as `SHOW DATABASES`) are now excluded from unsafe SQL checks that restrict access to the `system` database and `crdb_internal` schema. This change ensures that these commands continue to function even when access to internal components is otherwise restricted. #152084 +- The Physical Cluster Replication (PCR) reader tenant is always destroyed on cutover #152509 +- `SYSTEM` privileges are inherited in read-only mode in standby Physical Cluster Replication (PCR) clusters. #149708 +- You can now output transaction traces to the logs in Jaeger-compatible JSON format. This is controlled by the `sql.trace.txn.jaeger_json_output.enabled` cluster setting, which is disabled by default. When enabled, traces triggered by probabilistic sampling or statement latency thresholds will be formatted for easier ingestion by tools that support the Jaeger tracing format. #151414 +- You can now exclude internal transactions from probabilistic transaction tracing and latency-based logging by setting the `sql.trace.txn.include_internal.enabled` cluster setting to false. This setting is enabled by default to preserve the current behavior, but disabling it is recommended when debugging customer workloads to reduce noise in trace output. #151433

Command-line changes

-- The internal generator used by `cockroach workload` now supports parsing DDL schemas into a structured YAML format, enabling more flexible and detailed workload generation configurations. [#149513][#149513] -- Improved the performance of the `debug zip` query that collects `transaction_contention_events` data. This change reduces the risk of encountering “memory budget exceeded” or “query execution canceled due to statement timeout” errors. [#149570][#149570] -- The `cockroach workload` internals have been updated with built-in generators and wrappers for various SQL types—enabling modular, extensible, and reusable workload data generation. [#149728][#149728] -- Updated the internals of `cockroach workload` so there is one primary CLI entry point for workload generation, wiring together DDL parsing, schema construction, generator factory, and output routines. [#150321][#150321] -- Updated the redaction policy for cluster settings in `debug zip` output. All "sensitive" settings are now redacted in all debug zips, whether or not redaction is explicitly requested. In redacted debug zips, both "sensitive" and "non-reportable" settings are redacted. This replaces the previous behavior, which redacted all string-type settings only in redacted debug zips. [#150364][#150364] -- Added SQL workload extraction and rewriting support to the internals of `cockroach workload`, enabling placeholder‐driven data-generation workflows from CockroachDB debug logs. [#150614][#150614] -- Updated the help text for the `--database` and `--url` CLI flags to document support for virtual cluster syntax. The `--database` flag now shows examples of both simple database names and the `cluster:virtual-cluster/database` format. The `--url` flag examples now include the virtual cluster syntax in PostgreSQL connection URLs. [#150624][#150624] -- Updated `cockroach workload` internals to read init‑time schema and SQL artifacts and run SQL workloads with placeholder‑driven data generation. [#150836][#150836] -- Added support for simple `CHECK` constraints and bit/bytes column generators to `cockroach workload`'s workload generator. [#150926][#150926] -- Added a new file, `cluster_settings_history.txt`, to debug zips. This file contains a history of cluster setting changes based on the system event log table. The history is only available while the corresponding events remain in the table. Sensitive settings are always redacted, and non-reportable settings are redacted when the debug zip is generated with redaction enabled. [#151066][#151066] +- The internal generator used by `cockroach workload` now supports parsing DDL schemas into a structured YAML format, enabling more flexible and detailed workload generation configurations. #149513 +- Improved the performance of the `debug zip` query that collects `transaction_contention_events` data. This change reduces the risk of encountering “memory budget exceeded” or “query execution canceled due to statement timeout” errors. #149570 +- The `cockroach workload` internals have been updated with built-in generators and wrappers for various SQL types—enabling modular, extensible, and reusable workload data generation. #149728 +- Updated the internals of `cockroach workload` so there is one primary CLI entry point for workload generation, wiring together DDL parsing, schema construction, generator factory, and output routines. #150321 +- Updated the redaction policy for cluster settings in `debug zip` output. All "sensitive" settings are now redacted in all debug zips, whether or not redaction is explicitly requested. In redacted debug zips, both "sensitive" and "non-reportable" settings are redacted. This replaces the previous behavior, which redacted all string-type settings only in redacted debug zips. #150364 +- Added SQL workload extraction and rewriting support to the internals of `cockroach workload`, enabling placeholder‐driven data-generation workflows from CockroachDB debug logs. #150614 +- Updated the help text for the `--database` and `--url` CLI flags to document support for virtual cluster syntax. The `--database` flag now shows examples of both simple database names and the `cluster:virtual-cluster/database` format. The `--url` flag examples now include the virtual cluster syntax in PostgreSQL connection URLs. #150624 +- Updated `cockroach workload` internals to read init‑time schema and SQL artifacts and run SQL workloads with placeholder‑driven data generation. #150836 +- Added support for simple `CHECK` constraints and bit/bytes column generators to `cockroach workload`'s workload generator. #150926 +- Added a new file, `cluster_settings_history.txt`, to debug zips. This file contains a history of cluster setting changes based on the system event log table. The history is only available while the corresponding events remain in the table. Sensitive settings are always redacted, and non-reportable settings are redacted when the debug zip is generated with redaction enabled. #151066

DB Console changes

-- Renamed the 'Hot Ranges' page in the DB Console to 'Top Ranges' to clarify that it shows the highest-ranked ranges by various metrics, not necessarily those experiencing high activity. [#149713][#149713] -- Fixed a bug where **Drop Unused Index** recommendations were not populated on the Schema Insights tab after a hard refresh of the Insights page. [#149838][#149838] -- Updated the DB Console so that the tenant dropdown now appears in insecure mode when multiple virtual clusters are available. [#150535][#150535] +- Renamed the 'Hot Ranges' page in the DB Console to 'Top Ranges' to clarify that it shows the highest-ranked ranges by various metrics, not necessarily those experiencing high activity. #149713 +- Fixed a bug where **Drop Unused Index** recommendations were not populated on the Schema Insights tab after a hard refresh of the Insights page. #149838 +- Updated the DB Console so that the tenant dropdown now appears in insecure mode when multiple virtual clusters are available. #150535

Bug fixes

-- Fixed an issue where hot range logging for virtual clusters omitted some hot ranges. [#143775][#143775] -- Removed unnecessary Kafka topic creation that could cause changefeed startup to fail when using `changefeed.new_kafka_sink_enabled=false`. [#146476][#146476] -- Fixed a bug that would cause a `CALL` statement executed via a portal in the extended wire protocol to result in an error like `unknown portal ""` if the stored procedure contained `COMMIT` or `ROLLBACK` statements. The bug had existed since PL/pgSQL transaction control statements were introduced in v24.1. The fix is off by default in versions prior to v25.3. [#147923][#147923] +- Fixed an issue where hot range logging for virtual clusters omitted some hot ranges. #143775 +- Removed unnecessary Kafka topic creation that could cause changefeed startup to fail when using `changefeed.new_kafka_sink_enabled=false`. #146476 +- Fixed a bug that would cause a `CALL` statement executed via a portal in the extended wire protocol to result in an error like `unknown portal ""` if the stored procedure contained `COMMIT` or `ROLLBACK` statements. The bug had existed since PL/pgSQL transaction control statements were introduced in v24.1. The fix is off by default in versions prior to v25.3. #147923 - Fixed a bug present since v24.1 where the allocator could make rebalancing decisions based on stale data, failing to account for - recent local lease transfers not yet reflected in store capacity or gossip. [#148476][#148476] + recent local lease transfers not yet reflected in store capacity or gossip. #148476 - A bug where a changefeed that was created before v25.2 could fail after upgrading to v25.2 with the error message `both legacy and current checkpoint set on change aggregator spec` - has now been fixed. [#148617][#148617] -- CockroachDB now supports decoding `VECTOR` and `BOX2D` types from the binary format of the PostgreSQL extended protocol (pgwire). [#148719][#148719] + has now been fixed. #148617 +- CockroachDB now supports decoding `VECTOR` and `BOX2D` types from the binary format of the PostgreSQL extended protocol (pgwire). #148719 - The `RESET ALL` statement no longer affects the following session variables: - `is_superuser` - `role` @@ -218,237 +218,79 @@ Release Date: September 17, 2025 - `transaction_status` - `transaction_read_only` - This better matches PostgreSQL behavior for `RESET ALL`. In addition, the `DISCARD ALL` statement no longer errors when `default_transaction_use_follower_reads` is enabled. [#148770][#148770] -- CockroachDB now prohibits `ORDER BY` and join equality operations on `REFCURSOR` types, matching PostgreSQL behavior. [#148863][#148863] -- Previously, CockroachDB could hit an internal error when performing a `DELETE`, `UPDATE`, or `UPSERT` where the initial scan of the mutation is locking and is on a table different from the one being mutated. A possible workaround was `SET enable_implicit_select_for_update = false`, but this could increase contention. The bug was introduced in v25.2 and is now fixed. [#149093][#149093] + This better matches PostgreSQL behavior for `RESET ALL`. In addition, the `DISCARD ALL` statement no longer errors when `default_transaction_use_follower_reads` is enabled. #148770 +- CockroachDB now prohibits `ORDER BY` and join equality operations on `REFCURSOR` types, matching PostgreSQL behavior. #148863 +- Previously, CockroachDB could hit an internal error when performing a `DELETE`, `UPDATE`, or `UPSERT` where the initial scan of the mutation is locking and is on a table different from the one being mutated. A possible workaround was `SET enable_implicit_select_for_update = false`, but this could increase contention. The bug was introduced in v25.2 and is now fixed. #149093 - Fixes a race condition when advancing a changefeed aggregator's frontier. When hit, the race condition could result in an internal error that would shut down the kvfeed and cause - the changefeed to retry. [#149119][#149119] -- CockroachDB now supports case-insensitive matching for keyword identifiers in JSONPath queries. Note that the special identifiers `TRUE`, `FALSE`, and `NULL` are parsed case-insensitively in CockroachDB, but are case-sensitive in PostgreSQL. For example, `SELECT '$.active == TrUe'::jsonpath;` succeeds in CockroachDB, but fails in PostgreSQL. [#149251][#149251] -- In v25.1, automatic partial statistics collection was enabled by default (by setting the `sql.stats.automatic_partial_collection.enabled` cluster setting to `true`). Partial statistics collection may encounter certain expected scenarios that were previously reported as failed stats jobs with PostgreSQL error code `55000`. These errors are benign and are no longer reported. Instead, the stats job will be marked as "succeeded," though no new statistics will be created. [#149279][#149279] -- Fixed a minor bug that caused inconsistent behavior with the very rarely used `"char"` type (distinct from `CHAR`). [#149433][#149433] -- CockroachDB now allows `EXPLAIN` of mutation statements in read-only transaction mode, matching PostgreSQL behavior. Note that `EXPLAIN ANALYZE` of mutations is still disallowed, since this variant actually executes the statement. [#149449][#149449] -- Fixed an issue where some SQL metrics were not reported when `server.child_metrics.enabled` was enabled, `server.child_metrics.include_aggregate.enabled` was disabled, and `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` were also disabled. Specifically, metrics with no children now report their aggregate metrics regardless of the `server.child_metrics.include_aggregate.enabled` cluster setting. [#149540][#149540] + the changefeed to retry. #149119 +- CockroachDB now supports case-insensitive matching for keyword identifiers in JSONPath queries. Note that the special identifiers `TRUE`, `FALSE`, and `NULL` are parsed case-insensitively in CockroachDB, but are case-sensitive in PostgreSQL. For example, `SELECT '$.active == TrUe'::jsonpath;` succeeds in CockroachDB, but fails in PostgreSQL. #149251 +- In v25.1, automatic partial statistics collection was enabled by default (by setting the `sql.stats.automatic_partial_collection.enabled` cluster setting to `true`). Partial statistics collection may encounter certain expected scenarios that were previously reported as failed stats jobs with PostgreSQL error code `55000`. These errors are benign and are no longer reported. Instead, the stats job will be marked as "succeeded," though no new statistics will be created. #149279 +- Fixed a minor bug that caused inconsistent behavior with the very rarely used `"char"` type (distinct from `CHAR`). #149433 +- CockroachDB now allows `EXPLAIN` of mutation statements in read-only transaction mode, matching PostgreSQL behavior. Note that `EXPLAIN ANALYZE` of mutations is still disallowed, since this variant actually executes the statement. #149449 +- Fixed an issue where some SQL metrics were not reported when `server.child_metrics.enabled` was enabled, `server.child_metrics.include_aggregate.enabled` was disabled, and `sql.metrics.application_name.enabled` and `sql.metrics.database_name.enabled` were also disabled. Specifically, metrics with no children now report their aggregate metrics regardless of the `server.child_metrics.include_aggregate.enabled` cluster setting. #149540 - Fixed a bug where database login could fail during LDAP, JWT, or OIDC authentication if the user's external group memberships did not correspond to any existing roles in the database. The login will now succeed, and no roles will be granted or - revoked in this scenario. [#149638][#149638] -- Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (although the gateway node of the plan was not affected), and could only be mitigated by restarting the node. [#149800][#149800] -- Attempting to create a vector index with the legacy schema changer will now fail gracefully instead of crashing the node. [#149812][#149812] -- Improved split and scatter behavior for `CREATE INDEX` when statistics are available for key columns. [#150238][#150238] -- Fixed a bug that was preventing the row-level TTL table storage parameters (e.g., `ttl_select_batch_size`, `ttl_delete_batch_size`, `ttl_delete_rate_limit`, `ttl_select_rate_limit`) from being set to `0`, which is their default value. [#150253][#150253] -- Fixed an issue where discarding zone configs on sequences did not actually remove the configuration. [#150255][#150255] + revoked in this scenario. #149638 +- Fixed a slow memory leak that was introduced in v25.1.8, v25.2.1, v25.2.2, and v25.3 betas. The leak would accumulate whenever a node executed a part of the distributed plan (although the gateway node of the plan was not affected), and could only be mitigated by restarting the node. #149800 +- Attempting to create a vector index with the legacy schema changer will now fail gracefully instead of crashing the node. #149812 +- Improved split and scatter behavior for `CREATE INDEX` when statistics are available for key columns. #150238 +- Fixed a bug that was preventing the row-level TTL table storage parameters (e.g., `ttl_select_batch_size`, `ttl_delete_batch_size`, `ttl_delete_rate_limit`, `ttl_select_rate_limit`) from being set to `0`, which is their default value. #150253 +- Fixed an issue where discarding zone configs on sequences did not actually remove the configuration. #150255 - Fixed a bug where modifying a changefeed with `ALTER CHANGEFEED` that either unset or left the `gc_protect_expires_after` option unset would cause the changefeed's max PTS age to become unbounded instead of being set to the default value configured by the `changefeed.protect_timestamp.max_age` - cluster setting. [#150283][#150283] -- Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. [#150291][#150291] -- Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. [#150350][#150350] -- Fixed invalid zone configurations that were generated when adding a super region to a 3-region database with a secondary region and region survivability. Previously, this could result in assigning more than the allowed number of replicas. [#150413][#150413] -- Addressed a bug on `schema_locked` tables when a column is dropped, and `schema_locked` is toggled for the user. [#150435][#150435] -- Fixed the `pg_catalog.pg_type` enties for the "any" and "trigger" pseudotypes. [#150777][#150777] -- Fixed an issue where Row Level Security (RLS) policies with missing dependencies during table-level restores could cause inconsistent state or restore failures. [#151045][#151045] -- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to go undetected, potentially causing incomplete backups. [#151058][#151058] -- Previously, CockroachDB could encounter an internal error `trying to add a column of UNKNOWN type at ...` in rare cases when handling `CASE` or `OR` operations. This bug was present since v20.2 and is now fixed. [#151093][#151093] -- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. [#151185][#151185] + cluster setting. #150283 +- Fixed a bug that would allow a race condition in foreign key cascades under `READ COMMITTED` and `REPEATABLE READ` isolation levels. #150291 +- Fixed a bug where the entire schema would become inaccessible if a table was referenced as an implicit record type by a user-defined function (UDF) while the table was undergoing an `IMPORT`. #150350 +- Fixed invalid zone configurations that were generated when adding a super region to a 3-region database with a secondary region and region survivability. Previously, this could result in assigning more than the allowed number of replicas. #150413 +- Addressed a bug on `schema_locked` tables when a column is dropped, and `schema_locked` is toggled for the user. #150435 +- Fixed the `pg_catalog.pg_type` enties for the "any" and "trigger" pseudotypes. #150777 +- Fixed an issue where Row Level Security (RLS) policies with missing dependencies during table-level restores could cause inconsistent state or restore failures. #151045 +- Fixed a bug that could cause some errors returned by attempts to upload backup data to external storage providers to go undetected, potentially causing incomplete backups. #151058 +- Previously, CockroachDB could encounter an internal error `trying to add a column of UNKNOWN type at ...` in rare cases when handling `CASE` or `OR` operations. This bug was present since v20.2 and is now fixed. #151093 +- Fixed a bug where `debug.zip` files collected from clusters with `disallow_full_table_scans` enabled were missing system table data. #151185 - Fix handling of empty arrays in JSONPath lax mode comparisons. Empty arrays now return false for comparisons in lax mode and - null in strict mode, matching PostgreSQL behavior. [#151226][#151226] -- Fixed a bug where `DROP USER` succeeded even though a role owned default privileges, which could leave invalid privilege entries in the system. [#151472][#151472] -- Fixed a bug where sequences could lose references to triggers, allowing them to be dropped incorrectly. [#151546][#151546] -- Previously, CockroachDB could incorrectly elevate the number of rows deleted on tables with multiple column families. The bug was present v19.2 and is now fixed. Note that the data was deleted correctly, but the "rows affected" number was wrong. [#151563][#151563] -- Added an automatic repair for dangling or invalid entries in the `system.comments` table. [#151737][#151737] -- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. [#151774][#151774] -- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. [#151947][#151947] -- Fixed a bug where executing certain statements with `BETWEEN SYMMETRIC` expressions could panic if used with values of different types, such as `... b'bytes' BETWEEN SYMMETRIC 'a' AND 'c'`. [#151951][#151951] -- Fixed a bug where `SHOW TABLES` would show inaccurate row counts if the most recent statistics collection was partial. [#152033][#152033] -- Fixed a bug that prevented `RESTORE` from working if there were computed columns or `ON UPDATE` expressions that referenced user-defined functions (UDFs). This bug was introduced in v25.3.0. [#152193][#152193] + null in strict mode, matching PostgreSQL behavior. #151226 +- Fixed a bug where `DROP USER` succeeded even though a role owned default privileges, which could leave invalid privilege entries in the system. #151472 +- Fixed a bug where sequences could lose references to triggers, allowing them to be dropped incorrectly. #151546 +- Previously, CockroachDB could incorrectly elevate the number of rows deleted on tables with multiple column families. The bug was present v19.2 and is now fixed. Note that the data was deleted correctly, but the "rows affected" number was wrong. #151563 +- Added an automatic repair for dangling or invalid entries in the `system.comments` table. #151737 +- Previously, CockroachDB could hit an error `ERROR: span with results after resume span...` when evaluating some queries with `ORDER BY ... DESC` in an edge case. This bug was present since v22.1 and is now fixed. #151774 +- Fixed a bug where updating column default expressions would incorrectly remove sequence ownerships for the affected column. #151947 +- Fixed a bug where executing certain statements with `BETWEEN SYMMETRIC` expressions could panic if used with values of different types, such as `... b'bytes' BETWEEN SYMMETRIC 'a' AND 'c'`. #151951 +- Fixed a bug where `SHOW TABLES` would show inaccurate row counts if the most recent statistics collection was partial. #152033 +- Fixed a bug that prevented `RESTORE` from working if there were computed columns or `ON UPDATE` expressions that referenced user-defined functions (UDFs). This bug was introduced in v25.3.0. #152193 - Fixed a bug that allowed foreign-key violations to result from some combinations of concurrent `READ COMMITTED` and `SERIALIZABLE` transactions. If both `SERIALIZABLE` and weaker-isolation transactions will concurrently modify rows involved in foreign-key relationships, the `SERIALIZABLE` transactions must have the following session variables set in order to prevent any possible foreign-key violations: - `SET enable_implicit_fk_locking_for_serializable = on;` - `SET enable_shared_locking_for_serializable = on;` - - `SET enable_durable_locking_for_serializable = on;` [#152245][#152245] -- Added the `use_soft_limit_for_distribute_scan` session variable (default: `false`), which controls whether CockroachDB uses the soft row count estimate when deciding whether an execution plan should be distributed. In v25.1, the physical planning heuristics were changed such that large constrained table scans, estimated to scan at least 10,000 rows (controlled via `distribute_scan_row_count_threshold`), would force plan distribution when `distsql=auto`. However, if the scan had a "soft limit" CockroachDB would still use the full estimate (for example, `10,000` in `estimated row count: 100–10,000`), sometimes unnecessarily distributing queries and increasing latency. The `use_soft_limit_for_distribute_scan` session variable addresses this by allowing the planner to use the soft limit when deciding whether a scan is "large". [#152300][#152300] -- `pg_class.pg_depend` now contains entries with `deptype='i'` (internal) for identity columns that own sequences. These previously had `deptype='a'` (auto). [#152309][#152309] -- Fixed a bug that caused an error when dropping a column and a `UNIQUE WITHOUT INDEX` constraint that referenced it in the same transaction. [#152447][#152447] -- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. [#152670][#152670] + - `SET enable_durable_locking_for_serializable = on;` #152245 +- Added the `use_soft_limit_for_distribute_scan` session variable (default: `false`), which controls whether CockroachDB uses the soft row count estimate when deciding whether an execution plan should be distributed. In v25.1, the physical planning heuristics were changed such that large constrained table scans, estimated to scan at least 10,000 rows (controlled via `distribute_scan_row_count_threshold`), would force plan distribution when `distsql=auto`. However, if the scan had a "soft limit" CockroachDB would still use the full estimate (for example, `10,000` in `estimated row count: 100–10,000`), sometimes unnecessarily distributing queries and increasing latency. The `use_soft_limit_for_distribute_scan` session variable addresses this by allowing the planner to use the soft limit when deciding whether a scan is "large". #152300 +- `pg_class.pg_depend` now contains entries with `deptype='i'` (internal) for identity columns that own sequences. These previously had `deptype='a'` (auto). #152309 +- Fixed a bug that caused an error when dropping a column and a `UNIQUE WITHOUT INDEX` constraint that referenced it in the same transaction. #152447 +- Fixed a bug where views could not reference the `crdb_region` column from their underlying tables in expressions. #152670

Performance improvements

-- Some queries with filters of the form `x IS NOT DISTINCT FROM y` now have more optimal query plans. [#146494][#146494] -- Mutation statements (`UPDATE` and `DELETE`) that perform lookup joins into multi-region tables (perhaps as part of a `CASCADE`) are now more likely to parallelize the lookups across ranges, improving their performance. [#148186][#148186] -- `LIKE` filter expressions of the form `x LIKE '%'` are now normalized to `TRUE` if `x` is non-`NULL` within a `SELECT` expression. [#148763][#148763] -- Filters of the form `x LIKE '%'` are now normalized to `x IS NOT NULL`, enabling performance improvements on both nullable and non-nullable columns. Previously, such filters were normalized directly to `TRUE`, which only applied to non-`NULL` columns. [#149614][#149614] -- Updated the storage engine to reduce write amplification by storing Raft log values in separate blob files. This reduces write bandwidth, especially on stores with many replicas. This in turn can increase throughput and reduce latency. This behavior is active as long as the `storage.value_separation.enabled` cluster setting is enabled. [#149712][#149712] -- Improved the efficiency and throughput of catch-up scans used by Change Data Capture (CDC) and Physical Cluster Replication (PCR) in cases where substantial catch-up work is required. [#150738][#150738] -- Certain types of simple queries on tables with row-level security enabled are now more efficiently executed. [#151337][#151337] -- `LTREE` is now index-accelerated with the `@>` operator. [#152353][#152353] -- `LTREE` is now index-accelerated with the `<@` operator. [#152353][#152353] -- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. [#152399][#152399] +- Some queries with filters of the form `x IS NOT DISTINCT FROM y` now have more optimal query plans. #146494 +- Mutation statements (`UPDATE` and `DELETE`) that perform lookup joins into multi-region tables (perhaps as part of a `CASCADE`) are now more likely to parallelize the lookups across ranges, improving their performance. #148186 +- `LIKE` filter expressions of the form `x LIKE '%'` are now normalized to `TRUE` if `x` is non-`NULL` within a `SELECT` expression. #148763 +- Filters of the form `x LIKE '%'` are now normalized to `x IS NOT NULL`, enabling performance improvements on both nullable and non-nullable columns. Previously, such filters were normalized directly to `TRUE`, which only applied to non-`NULL` columns. #149614 +- Updated the storage engine to reduce write amplification by storing Raft log values in separate blob files. This reduces write bandwidth, especially on stores with many replicas. This in turn can increase throughput and reduce latency. This behavior is active as long as the `storage.value_separation.enabled` cluster setting is enabled. #149712 +- Improved the efficiency and throughput of catch-up scans used by Change Data Capture (CDC) and Physical Cluster Replication (PCR) in cases where substantial catch-up work is required. #150738 +- Certain types of simple queries on tables with row-level security enabled are now more efficiently executed. #151337 +- `LTREE` is now index-accelerated with the `@>` operator. #152353 +- `LTREE` is now index-accelerated with the `<@` operator. #152353 +- Lookup joins can now be used on tables with virtual columns even if the type of the search argument is not identical to the column type referenced in the virtual column. #152399

Build changes

-- Upgraded to Go 1.23.12 [#152207][#152207] +- Upgraded to Go 1.23.12 #152207 -[#152399]: https://github.com/cockroachdb/cockroach/pull/152399 -[#150655]: https://github.com/cockroachdb/cockroach/pull/150655 -[#144735]: https://github.com/cockroachdb/cockroach/pull/144735 -[#149463]: https://github.com/cockroachdb/cockroach/pull/149463 -[#149513]: https://github.com/cockroachdb/cockroach/pull/149513 -[#150350]: https://github.com/cockroachdb/cockroach/pull/150350 -[#152023]: https://github.com/cockroachdb/cockroach/pull/152023 -[#148782]: https://github.com/cockroachdb/cockroach/pull/148782 -[#150321]: https://github.com/cockroachdb/cockroach/pull/150321 -[#149838]: https://github.com/cockroachdb/cockroach/pull/149838 -[#149119]: https://github.com/cockroachdb/cockroach/pull/149119 -[#146494]: https://github.com/cockroachdb/cockroach/pull/146494 -[#146813]: https://github.com/cockroachdb/cockroach/pull/146813 -[#151821]: https://github.com/cockroachdb/cockroach/pull/151821 -[#149800]: https://github.com/cockroachdb/cockroach/pull/149800 -[#148753]: https://github.com/cockroachdb/cockroach/pull/148753 -[#146448]: https://github.com/cockroachdb/cockroach/pull/146448 -[#150647]: https://github.com/cockroachdb/cockroach/pull/150647 -[#150535]: https://github.com/cockroachdb/cockroach/pull/150535 -[#151951]: https://github.com/cockroachdb/cockroach/pull/151951 -[#150738]: https://github.com/cockroachdb/cockroach/pull/150738 -[#145549]: https://github.com/cockroachdb/cockroach/pull/145549 -[#148859]: https://github.com/cockroachdb/cockroach/pull/148859 -[#148849]: https://github.com/cockroachdb/cockroach/pull/148849 -[#151105]: https://github.com/cockroachdb/cockroach/pull/151105 -[#149570]: https://github.com/cockroachdb/cockroach/pull/149570 -[#146476]: https://github.com/cockroachdb/cockroach/pull/146476 -[#151337]: https://github.com/cockroachdb/cockroach/pull/151337 -[#147318]: https://github.com/cockroachdb/cockroach/pull/147318 -[#150501]: https://github.com/cockroachdb/cockroach/pull/150501 -[#151362]: https://github.com/cockroachdb/cockroach/pull/151362 -[#150364]: https://github.com/cockroachdb/cockroach/pull/150364 -[#151066]: https://github.com/cockroachdb/cockroach/pull/151066 -[#149712]: https://github.com/cockroachdb/cockroach/pull/149712 -[#147706]: https://github.com/cockroachdb/cockroach/pull/147706 -[#148525]: https://github.com/cockroachdb/cockroach/pull/148525 -[#149728]: https://github.com/cockroachdb/cockroach/pull/149728 -[#148719]: https://github.com/cockroachdb/cockroach/pull/148719 -[#149279]: https://github.com/cockroachdb/cockroach/pull/149279 -[#152245]: https://github.com/cockroachdb/cockroach/pull/152245 -[#149371]: https://github.com/cockroachdb/cockroach/pull/149371 -[#149638]: https://github.com/cockroachdb/cockroach/pull/149638 -[#151472]: https://github.com/cockroachdb/cockroach/pull/151472 -[#152033]: https://github.com/cockroachdb/cockroach/pull/152033 -[#152193]: https://github.com/cockroachdb/cockroach/pull/152193 -[#152447]: https://github.com/cockroachdb/cockroach/pull/152447 -[#151148]: https://github.com/cockroachdb/cockroach/pull/151148 -[#149438]: https://github.com/cockroachdb/cockroach/pull/149438 -[#150105]: https://github.com/cockroachdb/cockroach/pull/150105 -[#149153]: https://github.com/cockroachdb/cockroach/pull/149153 -[#149713]: https://github.com/cockroachdb/cockroach/pull/149713 -[#148476]: https://github.com/cockroachdb/cockroach/pull/148476 -[#151058]: https://github.com/cockroachdb/cockroach/pull/151058 -[#151737]: https://github.com/cockroachdb/cockroach/pull/151737 -[#149486]: https://github.com/cockroachdb/cockroach/pull/149486 -[#149898]: https://github.com/cockroachdb/cockroach/pull/149898 -[#151110]: https://github.com/cockroachdb/cockroach/pull/151110 -[#104649]: https://github.com/cockroachdb/cockroach/pull/104649 -[#151949]: https://github.com/cockroachdb/cockroach/pull/151949 -[#149373]: https://github.com/cockroachdb/cockroach/pull/149373 -[#143775]: https://github.com/cockroachdb/cockroach/pull/143775 -[#148770]: https://github.com/cockroachdb/cockroach/pull/148770 -[#149189]: https://github.com/cockroachdb/cockroach/pull/149189 -[#147272]: https://github.com/cockroachdb/cockroach/pull/147272 -[#149415]: https://github.com/cockroachdb/cockroach/pull/149415 -[#149096]: https://github.com/cockroachdb/cockroach/pull/149096 -[#150633]: https://github.com/cockroachdb/cockroach/pull/150633 -[#151093]: https://github.com/cockroachdb/cockroach/pull/151093 -[#151226]: https://github.com/cockroachdb/cockroach/pull/151226 -[#148186]: https://github.com/cockroachdb/cockroach/pull/148186 -[#151817]: https://github.com/cockroachdb/cockroach/pull/151817 -[#148746]: https://github.com/cockroachdb/cockroach/pull/148746 -[#150614]: https://github.com/cockroachdb/cockroach/pull/150614 -[#150435]: https://github.com/cockroachdb/cockroach/pull/150435 -[#149251]: https://github.com/cockroachdb/cockroach/pull/149251 -[#152300]: https://github.com/cockroachdb/cockroach/pull/152300 -[#150870]: https://github.com/cockroachdb/cockroach/pull/150870 -[#148314]: https://github.com/cockroachdb/cockroach/pull/148314 -[#149051]: https://github.com/cockroachdb/cockroach/pull/149051 -[#149261]: https://github.com/cockroachdb/cockroach/pull/149261 -[#149869]: https://github.com/cockroachdb/cockroach/pull/149869 -[#149538]: https://github.com/cockroachdb/cockroach/pull/149538 -[#151414]: https://github.com/cockroachdb/cockroach/pull/151414 -[#149821]: https://github.com/cockroachdb/cockroach/pull/149821 -[#151153]: https://github.com/cockroachdb/cockroach/pull/151153 -[#150624]: https://github.com/cockroachdb/cockroach/pull/150624 -[#150926]: https://github.com/cockroachdb/cockroach/pull/150926 -[#152309]: https://github.com/cockroachdb/cockroach/pull/152309 -[#149975]: https://github.com/cockroachdb/cockroach/pull/149975 -[#150747]: https://github.com/cockroachdb/cockroach/pull/150747 -[#151689]: https://github.com/cockroachdb/cockroach/pull/151689 -[#149449]: https://github.com/cockroachdb/cockroach/pull/149449 -[#148848]: https://github.com/cockroachdb/cockroach/pull/148848 -[#147923]: https://github.com/cockroachdb/cockroach/pull/147923 -[#148863]: https://github.com/cockroachdb/cockroach/pull/148863 -[#149540]: https://github.com/cockroachdb/cockroach/pull/149540 -[#150253]: https://github.com/cockroachdb/cockroach/pull/150253 -[#150413]: https://github.com/cockroachdb/cockroach/pull/150413 -[#150598]: https://github.com/cockroachdb/cockroach/pull/150598 -[#150887]: https://github.com/cockroachdb/cockroach/pull/150887 -[#151423]: https://github.com/cockroachdb/cockroach/pull/151423 -[#142930]: https://github.com/cockroachdb/cockroach/pull/142930 -[#152509]: https://github.com/cockroachdb/cockroach/pull/152509 -[#149614]: https://github.com/cockroachdb/cockroach/pull/149614 -[#151433]: https://github.com/cockroachdb/cockroach/pull/151433 -[#151041]: https://github.com/cockroachdb/cockroach/pull/151041 -[#148200]: https://github.com/cockroachdb/cockroach/pull/148200 -[#149708]: https://github.com/cockroachdb/cockroach/pull/149708 -[#152557]: https://github.com/cockroachdb/cockroach/pull/152557 -[#151807]: https://github.com/cockroachdb/cockroach/pull/151807 -[#152353]: https://github.com/cockroachdb/cockroach/pull/152353 -[#148471]: https://github.com/cockroachdb/cockroach/pull/148471 -[#151186]: https://github.com/cockroachdb/cockroach/pull/151186 -[#150666]: https://github.com/cockroachdb/cockroach/pull/150666 -[#147864]: https://github.com/cockroachdb/cockroach/pull/147864 -[#148857]: https://github.com/cockroachdb/cockroach/pull/148857 -[#149602]: https://github.com/cockroachdb/cockroach/pull/149602 -[#150376]: https://github.com/cockroachdb/cockroach/pull/150376 -[#151959]: https://github.com/cockroachdb/cockroach/pull/151959 -[#150238]: https://github.com/cockroachdb/cockroach/pull/150238 -[#152084]: https://github.com/cockroachdb/cockroach/pull/152084 -[#150283]: https://github.com/cockroachdb/cockroach/pull/150283 -[#151546]: https://github.com/cockroachdb/cockroach/pull/151546 -[#151774]: https://github.com/cockroachdb/cockroach/pull/151774 -[#148821]: https://github.com/cockroachdb/cockroach/pull/148821 -[#149622]: https://github.com/cockroachdb/cockroach/pull/149622 -[#151827]: https://github.com/cockroachdb/cockroach/pull/151827 -[#148763]: https://github.com/cockroachdb/cockroach/pull/148763 -[#150771]: https://github.com/cockroachdb/cockroach/pull/150771 -[#151692]: https://github.com/cockroachdb/cockroach/pull/151692 -[#149093]: https://github.com/cockroachdb/cockroach/pull/149093 -[#149433]: https://github.com/cockroachdb/cockroach/pull/149433 -[#149812]: https://github.com/cockroachdb/cockroach/pull/149812 -[#149686]: https://github.com/cockroachdb/cockroach/pull/149686 -[#150375]: https://github.com/cockroachdb/cockroach/pull/150375 -[#148617]: https://github.com/cockroachdb/cockroach/pull/148617 -[#150291]: https://github.com/cockroachdb/cockroach/pull/150291 -[#151185]: https://github.com/cockroachdb/cockroach/pull/151185 -[#151947]: https://github.com/cockroachdb/cockroach/pull/151947 -[#152670]: https://github.com/cockroachdb/cockroach/pull/152670 -[#150476]: https://github.com/cockroachdb/cockroach/pull/150476 -[#151887]: https://github.com/cockroachdb/cockroach/pull/151887 -[#151563]: https://github.com/cockroachdb/cockroach/pull/151563 -[#148576]: https://github.com/cockroachdb/cockroach/pull/148576 -[#148616]: https://github.com/cockroachdb/cockroach/pull/148616 -[#151045]: https://github.com/cockroachdb/cockroach/pull/151045 -[#150696]: https://github.com/cockroachdb/cockroach/pull/150696 -[#148532]: https://github.com/cockroachdb/cockroach/pull/148532 -[#150836]: https://github.com/cockroachdb/cockroach/pull/150836 -[#148540]: https://github.com/cockroachdb/cockroach/pull/148540 -[#150255]: https://github.com/cockroachdb/cockroach/pull/150255 -[#152207]: https://github.com/cockroachdb/cockroach/pull/152207 -[#150777]: https://github.com/cockroachdb/cockroach/pull/150777 -[#149696]: https://github.com/cockroachdb/cockroach/pull/149696 -[#130547]: https://github.com/cockroachdb/cockroach/pull/130547 -[#152352]: https://github.com/cockroachdb/cockroach/pull/152352 -[#149848]: https://github.com/cockroachdb/cockroach/pull/149848 -[#151804]: https://github.com/cockroachdb/cockroach/pull/151804 diff --git a/src/current/_includes/releases/v25.4/v25.4.0-alpha.2.md b/src/current/_includes/releases/v25.4/v25.4.0-alpha.2.md index 32dcb5b8f26..4669bb67b64 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0-alpha.2.md +++ b/src/current/_includes/releases/v25.4/v25.4.0-alpha.2.md @@ -6,8 +6,8 @@ Release Date: September 23, 2025

SQL language changes

-- `CREATE USER` and `GRANT` role operations now wait for full-cluster visibility of the new user table version rather than blocking on convergence. [#150747][#150747] -- Introduced the `inspect_errors` system table. [#151821][#151821] +- `CREATE USER` and `GRANT` role operations now wait for full-cluster visibility of the new user table version rather than blocking on convergence. #150747 +- Introduced the `inspect_errors` system table. #151821 - You now manually create single-column partial statistics on boolean predicate expressions that can become simple index scans. These statistics can be created by adding a constraining `WHERE` expression to `CREATE STATISTICS`. For example: @@ -17,25 +17,25 @@ Release Date: September 23, 2025 INSERT INTO t VALUES (1), (2), (3), (4), (5); CREATE STATISTICS constrained_stat ON a FROM t WHERE a > 2; ~~~ - [#152469][#152469] + #152469 -- Added the `bulkio.import.row_count_validation.unsafe.enabled` cluster setting (default: `false`), which triggers an asynchronous `INSPECT` job at the end of an `IMPORT` to validate row counts. [#153294][#153294] +- Added the `bulkio.import.row_count_validation.unsafe.enabled` cluster setting (default: `false`), which triggers an asynchronous `INSPECT` job at the end of an `IMPORT` to validate row counts. #153294

Operational changes

-- Added the cluster setting `kvadmission.use_range_tenant_id_for_non_admin.enabled`, which can be used to disable the behavior where Admission Control uses the range's tenant ID for non-admin requests. This behavior is enabled by default. [#152181][#152181] -- CockroachDB now logs access to internal system tables and schemas considered unsafe (e.g., `crdb_internal` and `system`). A message is emitted to the `SENSITIVE_ACCESS` log channel when a user overrides the `allow_unsafe_internals` setting or is denied access to these areas. [#152532][#152532] +- Added the cluster setting `kvadmission.use_range_tenant_id_for_non_admin.enabled`, which can be used to disable the behavior where Admission Control uses the range's tenant ID for non-admin requests. This behavior is enabled by default. #152181 +- CockroachDB now logs access to internal system tables and schemas considered unsafe (e.g., `crdb_internal` and `system`). A message is emitted to the `SENSITIVE_ACCESS` log channel when a user overrides the `allow_unsafe_internals` setting or is denied access to these areas. #152532 - A new `changefeed` file group that collects - changefeed logs has been added to the default logging configuration. [#153381][#153381] + changefeed logs has been added to the default logging configuration. #153381

Bug fixes

-- Fixed a bug where the `schema_locked` storage parameter was not being enforced on the `TRUNCATE` command, which could cause changefeed jobs to fail. [#152932][#152932] -- Fixed a bug introduced in v21.2 where `IMPORT` operations with multiple CSV files could incorrectly reset the bulk summary after processing the first file, causing the actual progress to be overwritten with a `nil` value. [#153111][#153111] +- Fixed a bug where the `schema_locked` storage parameter was not being enforced on the `TRUNCATE` command, which could cause changefeed jobs to fail. #152932 +- Fixed a bug introduced in v21.2 where `IMPORT` operations with multiple CSV files could incorrectly reset the bulk summary after processing the first file, causing the actual progress to be overwritten with a `nil` value. #153111 - Fixed a bug introduced in v25.1.0 that would cause a node panic if a `SIGINT` signal was sent during the execution of - a `CHECK EXTERNAL CONNECTION` command. [#153380][#153380] -- Fixed a bug where index creation could fail due to validation errors if the schema change was retried or paused/resumed during the backfill. [#153583][#153583] + a `CHECK EXTERNAL CONNECTION` command. #153380 +- Fixed a bug where index creation could fail due to validation errors if the schema change was retried or paused/resumed during the backfill. #153583

Performance improvements

@@ -44,23 +44,10 @@ Release Date: September 23, 2025 fewer duplicates will need to be emitted during restarts. The default persistence interval is 30s, but this can be configured with the `changefeed.progress.frontier_persistence.interval` - cluster setting. [#153491][#153491] + cluster setting. #153491

Miscellaneous

-- CockroachDB now prevents negative values from appearing in network and disk counters collected from the operating system. These values could previously drop below zero due to hardware changes. Affected counters now reset their baseline values automatically. [#153048][#153048] +- CockroachDB now prevents negative values from appearing in network and disk counters collected from the operating system. These values could previously drop below zero due to hardware changes. Affected counters now reset their baseline values automatically. #153048 -[#153111]: https://github.com/cockroachdb/cockroach/pull/153111 -[#153583]: https://github.com/cockroachdb/cockroach/pull/153583 -[#150747]: https://github.com/cockroachdb/cockroach/pull/150747 -[#151821]: https://github.com/cockroachdb/cockroach/pull/151821 -[#153294]: https://github.com/cockroachdb/cockroach/pull/153294 -[#152532]: https://github.com/cockroachdb/cockroach/pull/152532 -[#153380]: https://github.com/cockroachdb/cockroach/pull/153380 -[#153491]: https://github.com/cockroachdb/cockroach/pull/153491 -[#153048]: https://github.com/cockroachdb/cockroach/pull/153048 -[#152469]: https://github.com/cockroachdb/cockroach/pull/152469 -[#152181]: https://github.com/cockroachdb/cockroach/pull/152181 -[#153381]: https://github.com/cockroachdb/cockroach/pull/153381 -[#152932]: https://github.com/cockroachdb/cockroach/pull/152932 diff --git a/src/current/_includes/releases/v25.4/v25.4.0-beta.1.md b/src/current/_includes/releases/v25.4/v25.4.0-beta.1.md index 8e116a494e6..eae72ba995b 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0-beta.1.md +++ b/src/current/_includes/releases/v25.4/v25.4.0-beta.1.md @@ -6,51 +6,34 @@ Release Date: October 1, 2025

SQL language changes

-- The logical cluster now uses an external connection and automatically updates its configuration when that connection changes. [#149261][#149261] -- Included `num_txn_retries` and `num_txn_auto_retries` into the `crdb_internal.{cluster,node}_queries` virtual tables as well as output of SHOW QUERIES. These columns, when not NULL, have the same information as `num_retries` and `num_auto_retries` columns of `crdb_internal.{cluster,node}_transactions` virtual tables for the same transaction in which the active query is executed. [#149503][#149503] -- Tables with vector indexes will no longer be taken offline while the vector index builds. [#151074][#151074] -- Introduced the unimplemented `SHOW INSPECT ERRORS` statement. [#151674][#151674] -- Added a built-in function, `crdb_internal.request_transaction_bundle`, that allows users to request a transaction diagnostics bundle for a specified transaction fingerprint ID. [#153608][#153608] -- Implemented the `pg_get_function_arg_default` builtin function. This also causes the `information_schema.parameters(parameter_default)` column to be populated correctly. [#153625][#153625] +- The logical cluster now uses an external connection and automatically updates its configuration when that connection changes. #149261 +- Included `num_txn_retries` and `num_txn_auto_retries` into the `crdb_internal.{cluster,node}_queries` virtual tables as well as output of SHOW QUERIES. These columns, when not NULL, have the same information as `num_retries` and `num_auto_retries` columns of `crdb_internal.{cluster,node}_transactions` virtual tables for the same transaction in which the active query is executed. #149503 +- Tables with vector indexes will no longer be taken offline while the vector index builds. #151074 +- Introduced the unimplemented `SHOW INSPECT ERRORS` statement. #151674 +- Added a built-in function, `crdb_internal.request_transaction_bundle`, that allows users to request a transaction diagnostics bundle for a specified transaction fingerprint ID. #153608 +- Implemented the `pg_get_function_arg_default` builtin function. This also causes the `information_schema.parameters(parameter_default)` column to be populated correctly. #153625

Operational changes

-- Removed the `bulkio.backup.deprecated_full_backup_with_subdir.enabled` cluster setting, since backups will now fail if this is set to true. [#153628][#153628] -- Raised the cache size for the storage engine's block cache to 256 MiB. Note that production systems should always configure this setting. [#153739][#153739] -- Deprecated the bespoke restore and import event logs. For any deployment that is reliant on those logs, use the status change event log which now plumbs the SQL user that owns the job. [#153889][#153889] -- The `incremental_location` option is now deprecated and will be removed in a future release. This feature was added so customers could define different TTL policies for incremental backups vs full backups. Users can still do this since incremental backups are by default stored in a distinct directory relative to full backups ({collection_root}/incrementals). [#153890][#153890] +- Removed the `bulkio.backup.deprecated_full_backup_with_subdir.enabled` cluster setting, since backups will now fail if this is set to true. #153628 +- Raised the cache size for the storage engine's block cache to 256 MiB. Note that production systems should always configure this setting. #153739 +- Deprecated the bespoke restore and import event logs. For any deployment that is reliant on those logs, use the status change event log which now plumbs the SQL user that owns the job. #153889 +- The `incremental_location` option is now deprecated and will be removed in a future release. This feature was added so customers could define different TTL policies for incremental backups vs full backups. Users can still do this since incremental backups are by default stored in a distinct directory relative to full backups ({collection_root}/incrementals). #153890

DB Console changes

-- In the DB Console, the **Active Executions** table on the Statements and Transactions pages now includes a new **Isolation Level** column. The Sessions page also includes a new **Default Isolation Level** column. [#153617][#153617] +- In the DB Console, the **Active Executions** table on the Statements and Transactions pages now includes a new **Isolation Level** column. The Sessions page also includes a new **Default Isolation Level** column. #153617

Bug fixes

-- Fixed a bug where a CockroachDB node could crash when executing DO statements that contain user-defined types (possibly non-existing) in non-default configuration. [#151849][#151849] -- Fixed a deadlock in `DROP COLUMN CASCADE` operations when dropping columns referenced by `STORED` computed columns. [#153683][#153683] -- Fixed a bug where `ALTER POLICY` was incorrectly dropping dependency tracking for functions, sequences, or types in policy expressions. [#153787][#153787] -- Fixed a bug where we would not show the pgwire `RowDescription` for `EXECUTE` statements that were themselves prepared using the pgwire `Parse` command. [#153905][#153905] -- Fixed a runtime error that could be hit if a new secondary index had a name collision with a primary index. [#153986][#153986] +- Fixed a bug where a CockroachDB node could crash when executing DO statements that contain user-defined types (possibly non-existing) in non-default configuration. #151849 +- Fixed a deadlock in `DROP COLUMN CASCADE` operations when dropping columns referenced by `STORED` computed columns. #153683 +- Fixed a bug where `ALTER POLICY` was incorrectly dropping dependency tracking for functions, sequences, or types in policy expressions. #153787 +- Fixed a bug where we would not show the pgwire `RowDescription` for `EXECUTE` statements that were themselves prepared using the pgwire `Parse` command. #153905 +- Fixed a runtime error that could be hit if a new secondary index had a name collision with a primary index. #153986

Miscellaneous

-- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with a `restoring table desc and namespace entries: table already exists` error. Informs: #153722 [#153724][#153724] - - -[#153739]: https://github.com/cockroachdb/cockroach/pull/153739 -[#153889]: https://github.com/cockroachdb/cockroach/pull/153889 -[#151849]: https://github.com/cockroachdb/cockroach/pull/151849 -[#153683]: https://github.com/cockroachdb/cockroach/pull/153683 -[#151674]: https://github.com/cockroachdb/cockroach/pull/151674 -[#149503]: https://github.com/cockroachdb/cockroach/pull/149503 -[#151074]: https://github.com/cockroachdb/cockroach/pull/151074 -[#153628]: https://github.com/cockroachdb/cockroach/pull/153628 -[#149261]: https://github.com/cockroachdb/cockroach/pull/149261 -[#153890]: https://github.com/cockroachdb/cockroach/pull/153890 -[#153905]: https://github.com/cockroachdb/cockroach/pull/153905 -[#153608]: https://github.com/cockroachdb/cockroach/pull/153608 -[#153617]: https://github.com/cockroachdb/cockroach/pull/153617 -[#153787]: https://github.com/cockroachdb/cockroach/pull/153787 -[#153986]: https://github.com/cockroachdb/cockroach/pull/153986 -[#153724]: https://github.com/cockroachdb/cockroach/pull/153724 -[#153625]: https://github.com/cockroachdb/cockroach/pull/153625 +- Fixed a bug where the presence of duplicate temporary tables in a backup caused the restore to fail with a `restoring table desc and namespace entries: table already exists` error. Informs: #153722 #153724 + + diff --git a/src/current/_includes/releases/v25.4/v25.4.0-beta.2.md b/src/current/_includes/releases/v25.4/v25.4.0-beta.2.md index 58336d8738a..84e0d230d99 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0-beta.2.md +++ b/src/current/_includes/releases/v25.4/v25.4.0-beta.2.md @@ -7,12 +7,12 @@ Release Date: October 10, 2025

General changes

- The changefeed bulk - delivery setting was made optional. [#154953][#154953] + delivery setting was made optional. #154953

SQL language changes

-- Added the `SHOW INSPECT ERRORS` command. This command can be used to view issues that are identified by running the `INSPECT` command to validate tables and indexes. [#154337][#154337] -- Added the `sql.catalog.allow_leased_descriptors.enabled` cluster setting, which is false by default. When set to true, queries that access the `pg_catalog` or `information_schema` can use cached leased descriptors to populate the data in those tables, with the tradeoff that some of the data could be stale. [#154491][#154491] +- Added the `SHOW INSPECT ERRORS` command. This command can be used to view issues that are identified by running the `INSPECT` command to validate tables and indexes. #154337 +- Added the `sql.catalog.allow_leased_descriptors.enabled` cluster setting, which is false by default. When set to true, queries that access the `pg_catalog` or `information_schema` can use cached leased descriptors to populate the data in those tables, with the tradeoff that some of the data could be stale. #154491 - CockroachDB now supports index acceleration for certain `jsonb_path_exists` filters used in `WHERE` clauses. Given `jsonb_path_exists(json_obj, json_path_expression)`, an inverted index is supported only when `json_path_expression` matches one of the following patterns: - The `json_path_expression` must **not** be in `strict` mode. - Keychain mode: `$.[key|wildcard].[key|wildcard]...` @@ -24,42 +24,26 @@ Release Date: October 10, 2025 - `$[*]` - `$.a.b.c == 12`, `$.a.b.c > 12`, or `$.a.b.c < 12` (operation expressions) - `$.a.b ? (@.a > 10)` (filter with inequality check) - [#154631][#154631] -- The optimizer can now use table statistics that merge the latest full statistic with all newer partial statistics, including those over arbitrary constraints over a single span. [#154755][#154755] + #154631 +- The optimizer can now use table statistics that merge the latest full statistic with all newer partial statistics, including those over arbitrary constraints over a single span. #154755

Operational changes

-- Two new changefeed metrics for tracking the max skew between a changefeed's slowest and fastest span/table have been added. The metrics are gauge metrics with the names `changefeed.progress_skew.{span,table}`. [#154166][#154166] -- The metrics `sql.select.started.count`, `sql.insert.started.count`, `sql.update.started.count`, and `sql.delete.started.count` are now emitted with labels under the common metric name `sql.started.count`, using a `query_type` label to distinguish each operation. [#154388][#154388] -- Added the cluster setting `storage.unhealthy_write_duration` (defaults to 20s), which is used to indicate to the allocator that a store's disk is unhealthy. The cluster setting `kv.allocator.disk_unhealthy_io_overload_score` controls the overload score assigned to a store with an unhealthy disk, where a higher score results in preventing lease or replica transfers to the store, or shedding of leases by the store. The default value of that setting is 0, so the allocator behavior is unaffected. [#154459][#154459] -- Added cluster setting `sql.schema.approx_max_object_count` (default: 20,000) to prevent creation of new schema objects when the limit is exceeded. The check uses cached table statistics for performance and is approximate - it may not be immediately accurate until table statistics are updated by the background statistics refreshing job. Clusters that have been running stably with a larger object count should raise the limit or disable the limit by setting the value to 0. In future releases, the default value for this setting will be raised as more CockroachDB features support larger object counts. [#154576][#154576] +- Two new changefeed metrics for tracking the max skew between a changefeed's slowest and fastest span/table have been added. The metrics are gauge metrics with the names `changefeed.progress_skew.{span,table}`. #154166 +- The metrics `sql.select.started.count`, `sql.insert.started.count`, `sql.update.started.count`, and `sql.delete.started.count` are now emitted with labels under the common metric name `sql.started.count`, using a `query_type` label to distinguish each operation. #154388 +- Added the cluster setting `storage.unhealthy_write_duration` (defaults to 20s), which is used to indicate to the allocator that a store's disk is unhealthy. The cluster setting `kv.allocator.disk_unhealthy_io_overload_score` controls the overload score assigned to a store with an unhealthy disk, where a higher score results in preventing lease or replica transfers to the store, or shedding of leases by the store. The default value of that setting is 0, so the allocator behavior is unaffected. #154459 +- Added cluster setting `sql.schema.approx_max_object_count` (default: 20,000) to prevent creation of new schema objects when the limit is exceeded. The check uses cached table statistics for performance and is approximate - it may not be immediately accurate until table statistics are updated by the background statistics refreshing job. Clusters that have been running stably with a larger object count should raise the limit or disable the limit by setting the value to 0. In future releases, the default value for this setting will be raised as more CockroachDB features support larger object counts. #154576

Bug fixes

-- Vector index backfill will now properly track job progress in SHOW JOBS output. [#154261][#154261] -- A bug has been fixed that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the `expect_and_ignore_not_visible_columns_in_copy` setting was enabled. This bug has been present since `expect_and_ignore_not_visible_columns_in_copy` was introduced in v22.1.0. [#154289][#154289] -- **Idle latency** on the **Transaction Details** page in the DB Console is now reported more accurately. Previously, transactions that used prepared statements (e.g., with placeholders) overcounted idle time, while those that included observer statements (common in the SQL CLI) undercounted it. [#154385][#154385] -- Fixed a bug where `RESTORE` of a database with a `SECONDARY REGION` did not apply the lease preferences for that region. [#154659][#154659] -- A bug where a changefeed could perform many unnecessary job progress saves during an initial scan has been fixed. [#154709][#154709] -- A bug where a changefeed targeting only a subset of a table's column families could become stuck has been fixed. [#154915][#154915] +- Vector index backfill will now properly track job progress in SHOW JOBS output. #154261 +- A bug has been fixed that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the `expect_and_ignore_not_visible_columns_in_copy` setting was enabled. This bug has been present since `expect_and_ignore_not_visible_columns_in_copy` was introduced in v22.1.0. #154289 +- **Idle latency** on the **Transaction Details** page in the DB Console is now reported more accurately. Previously, transactions that used prepared statements (e.g., with placeholders) overcounted idle time, while those that included observer statements (common in the SQL CLI) undercounted it. #154385 +- Fixed a bug where `RESTORE` of a database with a `SECONDARY REGION` did not apply the lease preferences for that region. #154659 +- A bug where a changefeed could perform many unnecessary job progress saves during an initial scan has been fixed. #154709 +- A bug where a changefeed targeting only a subset of a table's column families could become stuck has been fixed. #154915

Performance improvements

-- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., x = $1). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. [#154899][#154899] +- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., x = $1). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. #154899 -[#154337]: https://github.com/cockroachdb/cockroach/pull/154337 -[#154491]: https://github.com/cockroachdb/cockroach/pull/154491 -[#154388]: https://github.com/cockroachdb/cockroach/pull/154388 -[#154459]: https://github.com/cockroachdb/cockroach/pull/154459 -[#154385]: https://github.com/cockroachdb/cockroach/pull/154385 -[#154755]: https://github.com/cockroachdb/cockroach/pull/154755 -[#154576]: https://github.com/cockroachdb/cockroach/pull/154576 -[#154915]: https://github.com/cockroachdb/cockroach/pull/154915 -[#154631]: https://github.com/cockroachdb/cockroach/pull/154631 -[#154261]: https://github.com/cockroachdb/cockroach/pull/154261 -[#154659]: https://github.com/cockroachdb/cockroach/pull/154659 -[#154953]: https://github.com/cockroachdb/cockroach/pull/154953 -[#154166]: https://github.com/cockroachdb/cockroach/pull/154166 -[#154289]: https://github.com/cockroachdb/cockroach/pull/154289 -[#154709]: https://github.com/cockroachdb/cockroach/pull/154709 -[#154899]: https://github.com/cockroachdb/cockroach/pull/154899 diff --git a/src/current/_includes/releases/v25.4/v25.4.0-beta.3.md b/src/current/_includes/releases/v25.4/v25.4.0-beta.3.md index 63ed3055b80..b86db0d76e7 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0-beta.3.md +++ b/src/current/_includes/releases/v25.4/v25.4.0-beta.3.md @@ -6,7 +6,6 @@ Release Date: October 16, 2025

Bug fixes

-- Fixed a bug that caused internal errors for `INSERT .. ON CONFLICT .. DO UPDATE` statements when the target table had both a computed column and a `BEFORE` trigger. This bug was present since triggers were introduced in v24.3.0. [#155077][#155077] +- Fixed a bug that caused internal errors for `INSERT .. ON CONFLICT .. DO UPDATE` statements when the target table had both a computed column and a `BEFORE` trigger. This bug was present since triggers were introduced in v24.3.0. #155077 -[#155077]: https://github.com/cockroachdb/cockroach/pull/155077 diff --git a/src/current/_includes/releases/v25.4/v25.4.0-rc.1.md b/src/current/_includes/releases/v25.4/v25.4.0-rc.1.md index d091a8f21d3..6382714ec19 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0-rc.1.md +++ b/src/current/_includes/releases/v25.4/v25.4.0-rc.1.md @@ -10,40 +10,31 @@ Release Date: October 22, 2025 `sql.log.scan_row_count_misestimate.enabled` to use structured logging including the table and index being scanned, the estimated and actual row counts, the time since the last table stats collection, and the - table's estimated staleness. [#155123][#155123] + table's estimated staleness. #155123 - Added a default-off cluster setting (`sql.log.scan_row_count_misestimate.enabled`) that enables logging a warning on the gateway node when optimizer estimates for scans are inaccurate. The log message includes the table and index being scanned, the estimated and actual row counts, the time since the last table stats - collection, and the table's estimated staleness. [#155123][#155123] -- Added the `INSPECT` command, which runs consistency validation check jobs against tables or databases and specified indexes. [#155441][#155441] + collection, and the table's estimated staleness. #155123 +- Added the `INSPECT` command, which runs consistency validation check jobs against tables or databases and specified indexes. #155441 - Added the `bulkio.index_backfill.vector_merge_batch_size cluster` setting to control how many vectors to merge into a vector index per transaction during - create operations. By default, this defaults to 3. [#155509][#155509] -- Vector indexing is now enabled by default. [#155561][#155561] + create operations. By default, this defaults to 3. #155509 +- Vector indexing is now enabled by default. #155561

Bug fixes

-- Fixed a bug that caused internal errors for `INSERT .. ON CONFLICT .. DO UPDATE` statements when the target table had both a computed column and a `BEFORE` trigger. This bug was present since triggers were introduced in v24.3.0. [#155077][#155077] +- Fixed a bug that caused internal errors for `INSERT .. ON CONFLICT .. DO UPDATE` statements when the target table had both a computed column and a `BEFORE` trigger. This bug was present since triggers were introduced in v24.3.0. #155077 - Disable a feature (`kv.lock_table.unreplicated_lock_reliability.split.enabled`) that could - lead to a node crash. [#155366][#155366] + lead to a node crash. #155366 - Previously, we could corrupt the first bucket of table statistic histograms in certain cases, causing underestimates for - range counts near the lower end of the domain, which is now fixed. [#155415][#155415] + range counts near the lower end of the domain, which is now fixed. #155415 - A potential deadlock during vector index - creation has been corrected. [#155508][#155508] -- Added proper dependency handling when adding a constraint with `NOT VALID` that references a user-defined function (UDF). [#155528][#155528] - - -[#155123]: https://github.com/cockroachdb/cockroach/pull/155123 -[#155441]: https://github.com/cockroachdb/cockroach/pull/155441 -[#155508]: https://github.com/cockroachdb/cockroach/pull/155508 -[#155509]: https://github.com/cockroachdb/cockroach/pull/155509 -[#155561]: https://github.com/cockroachdb/cockroach/pull/155561 -[#155077]: https://github.com/cockroachdb/cockroach/pull/155077 -[#155366]: https://github.com/cockroachdb/cockroach/pull/155366 -[#155415]: https://github.com/cockroachdb/cockroach/pull/155415 -[#155528]: https://github.com/cockroachdb/cockroach/pull/155528 + creation has been corrected. #155508 +- Added proper dependency handling when adding a constraint with `NOT VALID` that references a user-defined function (UDF). #155528 + + diff --git a/src/current/_includes/releases/v25.4/v25.4.0.md b/src/current/_includes/releases/v25.4/v25.4.0.md index aac3d5d33c4..1eb14034d38 100644 --- a/src/current/_includes/releases/v25.4/v25.4.0.md +++ b/src/current/_includes/releases/v25.4/v25.4.0.md @@ -276,25 +276,25 @@ This section describes newly identified limitations in CockroachDB v25.4. ##### Views -- The `security_invoker` attribute for views is not supported. Views always use the view definer's privileges when checking permissions. [#138918](https://github.com/cockroachdb/cockroach/issues/138918) -- Casting subqueries to [user-defined types]({% link v25.4/create-type.md %}) (including `ENUM`s) in views is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +- The `security_invoker` attribute for views is not supported. Views always use the view definer's privileges when checking permissions. #138918 +- Casting subqueries to [user-defined types]({% link v25.4/create-type.md %}) (including `ENUM`s) in views is not supported. #108184 ##### User-defined functions -- User-defined functions are not supported in partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). [#151686](https://github.com/cockroachdb/cockroach/issues/151686) +- User-defined functions are not supported in partial index predicates. #155488 +- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). #151686 ##### Stored procedures -- Pausable portals are not supported with `CALL` statements for stored procedures. [#151529](https://github.com/cockroachdb/cockroach/issues/151529) +- Pausable portals are not supported with `CALL` statements for stored procedures. #151529 ##### Mixed-isolation workloads -- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. [#151663](https://github.com/cockroachdb/cockroach/issues/151663#issuecomment-3222083180) +- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. #151663 ##### Data domiciling -- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. [#150783](https://github.com/cockroachdb/cockroach/issues/150783) +- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. #150783 {% comment %} {{site.data.alerts.callout_info}} diff --git a/src/current/_includes/releases/v25.4/v25.4.1.md b/src/current/_includes/releases/v25.4/v25.4.1.md index 55e02c71364..6d6ff38c5bd 100644 --- a/src/current/_includes/releases/v25.4/v25.4.1.md +++ b/src/current/_includes/releases/v25.4/v25.4.1.md @@ -6,57 +6,36 @@ Release Date: December 3, 2025

SQL language changes

-- Added a `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. [#156459][#156459] -- Added a `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. [#156459][#156459] -- Added a `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. [#156459][#156459] -- Added a `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. [#156459][#156459] -- CockroachDB now supports index accelerating `jsonb_path_exists` filters with JSONpath expressions that end with an AnyKey (`*`). [#156508][#156508] +- Added a `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. #156459 +- Added a `sql.statements.index_rows_written.count` metric that counts the number of primary and secondary index rows modified by SQL statements. #156459 +- Added a `sql.statements.index_bytes_written.count` metric that counts the number of primary and secondary index bytes modified by SQL statements. #156459 +- Added a `sql.statements.bytes_read.count` metric that counts the number of bytes scanned by SQL statements. #156459 +- CockroachDB now supports index accelerating `jsonb_path_exists` filters with JSONpath expressions that end with an AnyKey (`*`). #156508

Bug fixes

-- Fixed a bug where CockroachDB would hit an internal error when performing an inverted join using an inverted index in which the first prefix column had `DESC` direction. The bug was present since v21.1. [#154970][#154970] -- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. [#155633][#155633] -- Fixed an internal error that could occur when replacing a user-defined function or stored procedure using `CREATE OR REPLACE`, if the existing signature included multiple `DEFAULT` expressions. This bug was introduced in v24.2, when support for `DEFAULT` expressions was added. [#155927][#155927] -- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. [#155963][#155963] -- Fixed a bug where DML statements on regional by row tables with unique indexes that do not reference the region could sometimes fail under `READ COMMITTED` isolation. [#156136][#156136] -- Fixed a bug that prevented the optimizer from recognizing correlated filters when one of the filtered columns had a single distinct value across all rows. This could lead to suboptimal query plans in some cases. [#156286][#156286] -- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. [#156545][#156545] -- Fixed a bug where `DROP SCHEMA CASCADE` with complex references from triggers could run into an error. [#156564][#156564] -- Fixed a bug in the `ltree2text` built-in function where the returned `TEXT` value was incorrectly wrapped in single quotes. This bug had been present since the `ltree2text` function was introduced in v25.4.0. [#156667][#156667] -- Fixed a bug where the "atomic" `COPY` command (controlled via the `copy_from_atomic_enabled` session setting, `true` by default) could encounter `RETRY_COMMIT_DEADLINE_EXCEEDED` transaction errors if the whole command took 1 minute or more. This bug occurred only when the vectorized engine was used for `COPY`. [#156695][#156695] -- Fixed a bug that caused incorrect results for queries that filter indexed `LTREE` columns with the `<@` (contained-by) operator. This bug was present since v25.4.0. [#156779][#156779] -- Fixed a bug that caused incorrect `gossip.callbacks.pending_duration` metric values to be recorded. [#156947][#156947] -- Fixed a bug in JSONPath index acceleration where queries using `jsonb_path_exists` with a root key (e.g., `$.b`) incorrectly returned no results when the queried JSON was an array. This fix enables unwrapping a single array layer at the root, allowing the path to be evaluated against each element. This bug was present since v25.4.0. [#156968][#156968] -- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. [#156979][#156979] -- Fixed a bug where CockroachDB could encounter an internal error when evaluating a `COPY FROM` command in a transaction after it was rolled back to a savepoint. The bug was present since before v23.2. [#157037][#157037] +- Fixed a bug where CockroachDB would hit an internal error when performing an inverted join using an inverted index in which the first prefix column had `DESC` direction. The bug was present since v21.1. #154970 +- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. #155633 +- Fixed an internal error that could occur when replacing a user-defined function or stored procedure using `CREATE OR REPLACE`, if the existing signature included multiple `DEFAULT` expressions. This bug was introduced in v24.2, when support for `DEFAULT` expressions was added. #155927 +- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. #155963 +- Fixed a bug where DML statements on regional by row tables with unique indexes that do not reference the region could sometimes fail under `READ COMMITTED` isolation. #156136 +- Fixed a bug that prevented the optimizer from recognizing correlated filters when one of the filtered columns had a single distinct value across all rows. This could lead to suboptimal query plans in some cases. #156286 +- Fixed a bug where changefeeds using CDC queries could sometimes unexpectedly fail after a schema change with a descriptor retrieval error. #156545 +- Fixed a bug where `DROP SCHEMA CASCADE` with complex references from triggers could run into an error. #156564 +- Fixed a bug in the `ltree2text` built-in function where the returned `TEXT` value was incorrectly wrapped in single quotes. This bug had been present since the `ltree2text` function was introduced in v25.4.0. #156667 +- Fixed a bug where the "atomic" `COPY` command (controlled via the `copy_from_atomic_enabled` session setting, `true` by default) could encounter `RETRY_COMMIT_DEADLINE_EXCEEDED` transaction errors if the whole command took 1 minute or more. This bug occurred only when the vectorized engine was used for `COPY`. #156695 +- Fixed a bug that caused incorrect results for queries that filter indexed `LTREE` columns with the `<@` (contained-by) operator. This bug was present since v25.4.0. #156779 +- Fixed a bug that caused incorrect `gossip.callbacks.pending_duration` metric values to be recorded. #156947 +- Fixed a bug in JSONPath index acceleration where queries using `jsonb_path_exists` with a root key (e.g., `$.b`) incorrectly returned no results when the queried JSON was an array. This fix enables unwrapping a single array layer at the root, allowing the path to be evaluated against each element. This bug was present since v25.4.0. #156968 +- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. #156979 +- Fixed a bug where CockroachDB could encounter an internal error when evaluating a `COPY FROM` command in a transaction after it was rolled back to a savepoint. The bug was present since before v23.2. #157037

Performance improvements

-- The optimizer will no longer choose a generic query plan with unbounded cardinality over a custom query plan with bounded cardinality, regardless of `optimizer_prefer_bounded_cardinality`, better optimizing such queries. [#155460][#155460] +- The optimizer will no longer choose a generic query plan with unbounded cardinality over a custom query plan with bounded cardinality, regardless of `optimizer_prefer_bounded_cardinality`, better optimizing such queries. #155460

Miscellaneous

-- Logical Data Replication (LDR) no longer requires the database name to be specified in the external connection URI when setting up a bidirectional stream. [#155737][#155737] -- Span config reconciliation jobs no longer fail on the destination after failover from a Physical Cluster Replication (PCR) stream of a system virtual cluster. [#156812][#156812] - -[#156812]: https://github.com/cockroachdb/cockroach/pull/156812 -[#155927]: https://github.com/cockroachdb/cockroach/pull/155927 -[#157037]: https://github.com/cockroachdb/cockroach/pull/157037 -[#155460]: https://github.com/cockroachdb/cockroach/pull/155460 -[#156564]: https://github.com/cockroachdb/cockroach/pull/156564 -[#156667]: https://github.com/cockroachdb/cockroach/pull/156667 -[#156779]: https://github.com/cockroachdb/cockroach/pull/156779 -[#155737]: https://github.com/cockroachdb/cockroach/pull/155737 -[#155561]: https://github.com/cockroachdb/cockroach/pull/155561 -[#156459]: https://github.com/cockroachdb/cockroach/pull/156459 -[#156136]: https://github.com/cockroachdb/cockroach/pull/156136 -[#155963]: https://github.com/cockroachdb/cockroach/pull/155963 -[#156695]: https://github.com/cockroachdb/cockroach/pull/156695 -[#156968]: https://github.com/cockroachdb/cockroach/pull/156968 -[#156979]: https://github.com/cockroachdb/cockroach/pull/156979 -[#156508]: https://github.com/cockroachdb/cockroach/pull/156508 -[#154970]: https://github.com/cockroachdb/cockroach/pull/154970 -[#155633]: https://github.com/cockroachdb/cockroach/pull/155633 -[#156286]: https://github.com/cockroachdb/cockroach/pull/156286 -[#156545]: https://github.com/cockroachdb/cockroach/pull/156545 -[#156947]: https://github.com/cockroachdb/cockroach/pull/156947 +- Logical Data Replication (LDR) no longer requires the database name to be specified in the external connection URI when setting up a bidirectional stream. #155737 +- Span config reconciliation jobs no longer fail on the destination after failover from a Physical Cluster Replication (PCR) stream of a system virtual cluster. #156812 + diff --git a/src/current/_includes/releases/v25.4/v25.4.2.md b/src/current/_includes/releases/v25.4/v25.4.2.md index 77fc57233cc..b158c8cfb7d 100644 --- a/src/current/_includes/releases/v25.4/v25.4.2.md +++ b/src/current/_includes/releases/v25.4/v25.4.2.md @@ -6,25 +6,16 @@ Release Date: December 12, 2025

DB Console changes

-- The background (elastic) store graphs for exhausted duration, and the wait duration histogram, have been separated from the foreground (regular) graphs. [#156869][#156869] +- The background (elastic) store graphs for exhausted duration, and the wait duration histogram, have been separated from the foreground (regular) graphs. #156869

SQL language changes

-- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159469][#159469] +- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. #159469

Bug fixes

-- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159469][#159469] -- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. [#156523][#156523] -- Fixed a bug where CockroachDB could encounter a `vector encoder doesn't support ForcePut yet` error when executing `COPY` commands concurrently with certain schema changes. The bug had existed since before v23.2. [#157200][#157200] -- Fixed a bug that could cause a schema change to be stuck in the reverting state if the `infer_rbr_region_col_using_constraint` storage parameter was being set at the same time as adding a constraint that had a foreign key violation. [#157844][#157844] - -[#156523]: https://github.com/cockroachdb/cockroach/pull/156523 -[#156869]: https://github.com/cockroachdb/cockroach/pull/156869 -[#157011]: https://github.com/cockroachdb/cockroach/pull/157011 -[#157200]: https://github.com/cockroachdb/cockroach/pull/157200 -[#157228]: https://github.com/cockroachdb/cockroach/pull/157228 -[#157844]: https://github.com/cockroachdb/cockroach/pull/157844 -[#157853]: https://github.com/cockroachdb/cockroach/pull/157853 -[#159122]: https://github.com/cockroachdb/cockroach/pull/159122 -[#159469]: https://github.com/cockroachdb/cockroach/pull/159469 +- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. #159469 +- A mechanism that prevents unsafe replication changes from causing loss of quorum now functions correctly. An internal function has been fixed to properly return errors, enhancing the reliability of replication safeguards. #156523 +- Fixed a bug where CockroachDB could encounter a `vector encoder doesn't support ForcePut yet` error when executing `COPY` commands concurrently with certain schema changes. The bug had existed since before v23.2. #157200 +- Fixed a bug that could cause a schema change to be stuck in the reverting state if the `infer_rbr_region_col_using_constraint` storage parameter was being set at the same time as adding a constraint that had a foreign key violation. #157844 + diff --git a/src/current/_includes/releases/v25.4/v25.4.3.md b/src/current/_includes/releases/v25.4/v25.4.3.md index 43e242f1298..eb6eef3a265 100644 --- a/src/current/_includes/releases/v25.4/v25.4.3.md +++ b/src/current/_includes/releases/v25.4/v25.4.3.md @@ -6,45 +6,30 @@ Release Date: January 9, 2026

Operational changes

-- Successfully completed automatic SQL stats collecton jobs are now automatically purged rather than being retained for the full default job retention period. [#159412][#159412] +- Successfully completed automatic SQL stats collecton jobs are now automatically purged rather than being retained for the full default job retention period. #159412

Bug fixes

-- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. [#158346][#158346] +- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. #158346 - Fixed a bug that could cause incorrect query results when using prepared statements with _NULL_ placeholders. The bug has existed since v21.2 and violated SQL _NULL_-equality semantics by returning rows with _NULL_ values when the result set should have been empty. From v21.2 to v25.3, the bug occurred when all of the following were true: - The query was run with an explicit or implicit prepared statement - The query had an equality filter on a placeholder and a `UNIQUE` column - The column contained _NULL_ values - The placeholder was assigned to _NULL_ during execution - - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. [#159062][#159062] -- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. [#159404][#159404] -- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. [#159441][#159441] -- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159535][#159535] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159548][#159548] -- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. [#159622][#159622] -- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. [#159646][#159646] -- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in 24.3.0 and has been present in all versions since. [#159777][#159777] -- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. [#160086][#160086] -- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. [#160087][#160087] -- Fixed a deadlock that could occur when a statistics creation task panicked. [#160584][#160584] + - Starting in v25.4, the requirements to trigger the bug were loosened: the column no longer needed to be `UNIQUE`, and the bug could be reproduced if the column was included in any index. #159062 +- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. #159404 +- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. #159441 +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. #159535 +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. #159548 +- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. #159622 +- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. #159646 +- Fixed a bug causing a query predicate to be ignored when the predicate was on a column following one or more `ENUM` columns in an index, the predicate constrained the column to multiple values, and a lookup join to the index was chosen for the query plan. This bug was introduced in 24.3.0 and has been present in all versions since. #159777 +- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. #160086 +- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. #160087 +- Fixed a deadlock that could occur when a statistics creation task panicked. #160584

Performance improvements

-- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. [#158908][#158908] -- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. [#160603][#160603] - -[#159646]: https://github.com/cockroachdb/cockroach/pull/159646 -[#160086]: https://github.com/cockroachdb/cockroach/pull/160086 -[#158908]: https://github.com/cockroachdb/cockroach/pull/158908 -[#158346]: https://github.com/cockroachdb/cockroach/pull/158346 -[#159535]: https://github.com/cockroachdb/cockroach/pull/159535 -[#159622]: https://github.com/cockroachdb/cockroach/pull/159622 -[#160087]: https://github.com/cockroachdb/cockroach/pull/160087 -[#159412]: https://github.com/cockroachdb/cockroach/pull/159412 -[#159777]: https://github.com/cockroachdb/cockroach/pull/159777 -[#159441]: https://github.com/cockroachdb/cockroach/pull/159441 -[#159062]: https://github.com/cockroachdb/cockroach/pull/159062 -[#159404]: https://github.com/cockroachdb/cockroach/pull/159404 -[#159548]: https://github.com/cockroachdb/cockroach/pull/159548 -[#160584]: https://github.com/cockroachdb/cockroach/pull/160584 -[#160603]: https://github.com/cockroachdb/cockroach/pull/160603 +- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. #158908 +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. #160603 + diff --git a/src/current/_includes/releases/v25.4/v25.4.4.md b/src/current/_includes/releases/v25.4/v25.4.4.md index 3b62a92ac08..0aa9ad43bf2 100644 --- a/src/current/_includes/releases/v25.4/v25.4.4.md +++ b/src/current/_includes/releases/v25.4/v25.4.4.md @@ -6,39 +6,28 @@ Release Date: February 11, 2026

General changes

-- Changefeeds now support the `partition_alg` option for specifying a kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';` Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed continues using the `murmur2` algorithm unless the changefeed is altered to use a differed `partition_alg`. [#161534][#161534] +- Changefeeds now support the `partition_alg` option for specifying a kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';` Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed continues using the `murmur2` algorithm unless the changefeed is altered to use a differed `partition_alg`. #161534

DB Console changes

-- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. [#160936][#160936] +- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. #160936

Bug fixes

-- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. [#160087][#160087] -- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. [#160125][#160125] -- Fixed a deadlock that could occur when a statistics creation task panicked. [#160421][#160421] -- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. [#160476][#160476] -- Fixed a bug where `IMPORT` with Avro data using `OCF` format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since approximately v20.1. [#161324][#161324] -- Previously, if buffered writes were enabled (which is a public preview feature, off by default), multi-stmt explicit txns that use SAVEPOINTs to recover from certain errors (like duplicate key value violations) could lose the writes that were performed _before_ the savepoint was created in rare cases. The bug has been present since the buffered writes feature was added in 25.2 and is now fixed. [#162033][#162033] -- Fixed an error that occurred when using generic plan that generates a lookup join on indexes containing identity computed columns. [#162290][#162290] +- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. #160087 +- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. #160125 +- Fixed a deadlock that could occur when a statistics creation task panicked. #160421 +- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. #160476 +- Fixed a bug where `IMPORT` with Avro data using `OCF` format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since approximately v20.1. #161324 +- Previously, if buffered writes were enabled (which is a public preview feature, off by default), multi-stmt explicit txns that use SAVEPOINTs to recover from certain errors (like duplicate key value violations) could lose the writes that were performed _before_ the savepoint was created in rare cases. The bug has been present since the buffered writes feature was added in 25.2 and is now fixed. #162033 +- Fixed an error that occurred when using generic plan that generates a lookup join on indexes containing identity computed columns. #162290

Performance improvements

-- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. [#160628][#160628] +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. #160628

Build changes

-- Replaces bors with Trunk merge queue for better performance and reliability. Configuration-only change with no runtime impact - maintains same safety checks while improving CI workflow. [#161243][#161243] +- Replaces bors with Trunk merge queue for better performance and reliability. Configuration-only change with no runtime impact - maintains same safety checks while improving CI workflow. #161243 -[#160936]: https://github.com/cockroachdb/cockroach/pull/160936 -[#160087]: https://github.com/cockroachdb/cockroach/pull/160087 -[#160421]: https://github.com/cockroachdb/cockroach/pull/160421 -[#161324]: https://github.com/cockroachdb/cockroach/pull/161324 -[#162290]: https://github.com/cockroachdb/cockroach/pull/162290 -[#161534]: https://github.com/cockroachdb/cockroach/pull/161534 -[#160125]: https://github.com/cockroachdb/cockroach/pull/160125 -[#160476]: https://github.com/cockroachdb/cockroach/pull/160476 -[#162033]: https://github.com/cockroachdb/cockroach/pull/162033 -[#160628]: https://github.com/cockroachdb/cockroach/pull/160628 -[#161243]: https://github.com/cockroachdb/cockroach/pull/161243 diff --git a/src/current/_includes/releases/v25.4/v25.4.5.md b/src/current/_includes/releases/v25.4/v25.4.5.md index 3c11cfcf797..a57d1dfeb62 100644 --- a/src/current/_includes/releases/v25.4/v25.4.5.md +++ b/src/current/_includes/releases/v25.4/v25.4.5.md @@ -8,11 +8,8 @@ Release Date: February 19, 2026 - Fixed an error that occurred when using generic plan that generates a lookup join on indexes containing identity - computed columns. [#162290][#162290] -- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. [#163772][#163772] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. [#163802][#163802] + computed columns. #162290 +- Fixed a rare race condition between range splits and MVCC garbage collection where a GCRequest could target keys outside its declared span. In rare cases, this could result in data on the post-split right-hand side (RHS) being incorrectly garbage collected, potentially leading to lost writes. The system now detects and rejects such malformed GC requests. #163772 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using json or json-compact formatting. Debug zip generation now safely handles malformed log lines and prevents excessive memory consumption. #163802 -[#162290]: https://github.com/cockroachdb/cockroach/pull/162290 -[#163772]: https://github.com/cockroachdb/cockroach/pull/163772 -[#163802]: https://github.com/cockroachdb/cockroach/pull/163802 diff --git a/src/current/_includes/releases/v25.4/v25.4.6.md b/src/current/_includes/releases/v25.4/v25.4.6.md index 129fbee9fd6..e51b2f555af 100644 --- a/src/current/_includes/releases/v25.4/v25.4.6.md +++ b/src/current/_includes/releases/v25.4/v25.4.6.md @@ -6,16 +6,12 @@ Release Date: March 9, 2026

Bug fixes

-- Fixed a bug in which PL/pgSQL UDFs with many IF statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. [#162559][#162559] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163352][#163352] -- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. [#163960][#163960] +- Fixed a bug in which PL/pgSQL UDFs with many IF statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. #162559 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. #163352 +- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. #163960

Performance improvements

-- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#163282][#163282] +- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. #163282 -[#162559]: https://github.com/cockroachdb/cockroach/pull/162559 -[#163352]: https://github.com/cockroachdb/cockroach/pull/163352 -[#163960]: https://github.com/cockroachdb/cockroach/pull/163960 -[#163282]: https://github.com/cockroachdb/cockroach/pull/163282 diff --git a/src/current/_includes/releases/v25.4/v25.4.7.md b/src/current/_includes/releases/v25.4/v25.4.7.md index c8905ae6a91..8a33ff588e3 100644 --- a/src/current/_includes/releases/v25.4/v25.4.7.md +++ b/src/current/_includes/releases/v25.4/v25.4.7.md @@ -6,24 +6,16 @@ Release Date: April 3, 2026

Command-line changes

-- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. [#164149][#164149] +- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. #164149

Bug fixes

-- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. [#164799][#164799] -- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. [#164890][#164890] -- Fixed a bug introduced in v25.4 where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` lower than `500ms` is not recommended as it may cause degraded changefeed performance. [#164894][#164894] -- Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164935][#164935] -- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165566][#165566] -- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. [#166197][#166197] -- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. [#166225][#166225] - - -[#164935]: https://github.com/cockroachdb/cockroach/pull/164935 -[#165566]: https://github.com/cockroachdb/cockroach/pull/165566 -[#166197]: https://github.com/cockroachdb/cockroach/pull/166197 -[#166225]: https://github.com/cockroachdb/cockroach/pull/166225 -[#164149]: https://github.com/cockroachdb/cockroach/pull/164149 -[#164799]: https://github.com/cockroachdb/cockroach/pull/164799 -[#164890]: https://github.com/cockroachdb/cockroach/pull/164890 -[#164894]: https://github.com/cockroachdb/cockroach/pull/164894 +- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. #164799 +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. #164890 +- Fixed a bug introduced in v25.4 where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` lower than `500ms` is not recommended as it may cause degraded changefeed performance. #164894 +- Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. #164935 +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. #165566 +- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. #166197 +- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. #166225 + + diff --git a/src/current/_includes/releases/v25.4/v25.4.8.md b/src/current/_includes/releases/v25.4/v25.4.8.md index 4896abe0b3e..3a128f9a5c0 100644 --- a/src/current/_includes/releases/v25.4/v25.4.8.md +++ b/src/current/_includes/releases/v25.4/v25.4.8.md @@ -6,7 +6,6 @@ Release Date: April 10, 2026

Bug fixes

-- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. [#168040][#168040] +- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. #168040 -[#168040]: https://github.com/cockroachdb/cockroach/pull/168040 diff --git a/src/current/_includes/releases/v25.4/v25.4.9.md b/src/current/_includes/releases/v25.4/v25.4.9.md index f8f268fd88f..c192470bb7a 100644 --- a/src/current/_includes/releases/v25.4/v25.4.9.md +++ b/src/current/_includes/releases/v25.4/v25.4.9.md @@ -6,9 +6,7 @@ Release Date: April 20, 2026

Bug fixes

-- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. [#168040][#168040] -- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#168472][#168472] +- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. #168040 +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. #168472 -[#168040]: https://github.com/cockroachdb/cockroach/pull/168040 -[#168472]: https://github.com/cockroachdb/cockroach/pull/168472 diff --git a/src/current/_includes/releases/v26.1/backward-incompatible.md b/src/current/_includes/releases/v26.1/backward-incompatible.md index 8a2f51d71f5..f3a2e4a8f77 100644 --- a/src/current/_includes/releases/v26.1/backward-incompatible.md +++ b/src/current/_includes/releases/v26.1/backward-incompatible.md @@ -1,10 +1,10 @@ - Access to the `system` database and `crdb_internal` schema is now restricted by default. Queries to most of these namespaces will fail unless explicitly overridden via the `allow_unsafe_internals` session variable. This change prevents accidental misuse of internal-only objects that were not designed for stable external use and could lead to difficult recovery scenarios. As part of this effort, several commonly used introspection capabilities have been migrated to stable information_schema tables: - - `information_schema.crdb_datums_to_bytes` - previously only available as `crdb_internal.datums_to_bytes` [#156963](https://github.com/cockroachdb/cockroach/pull/156963) - - `information_schema.crdb_index_usage_stats` - previously only available as `crdb_internal.index_usage_stats` [#156963](https://github.com/cockroachdb/cockroach/pull/156963) - - `information_schema.crdb_rewrite_inline_hints` - replaces the function previously introduced as `crdb_internal.inject_hint` [#160946](https://github.com/cockroachdb/cockroach/pull/160946) + - `information_schema.crdb_datums_to_bytes` - previously only available as `crdb_internal.datums_to_bytes` #156963 + - `information_schema.crdb_index_usage_stats` - previously only available as `crdb_internal.index_usage_stats` #156963 + - `information_schema.crdb_rewrite_inline_hints` - replaces the function previously introduced as `crdb_internal.inject_hint` #160946 - [#158085](https://github.com/cockroachdb/cockroach/pull/158085) + #158085 - [FIPS-ready deployments]({% link v26.1/fips.md %}) in v26.1 are in Preview and are recommended for testing and evaluation only. CockroachDB v26.1 introduces a new FIPS architecture using Go's native FIPS 140-3 cryptographic module, replacing the OpenSSL-based approach from v25.4. This architectural change means: diff --git a/src/current/_includes/releases/v26.1/cluster-setting-changes.md b/src/current/_includes/releases/v26.1/cluster-setting-changes.md index 3f5a56f0188..205bc8d18ce 100644 --- a/src/current/_includes/releases/v26.1/cluster-setting-changes.md +++ b/src/current/_includes/releases/v26.1/cluster-setting-changes.md @@ -17,20 +17,6 @@ Changes to [cluster settings]({% link v26.1/cluster-settings.md %}) should be re - Events related to changefeed operations are now routed to the CHANGEFEED channel, while sampled queries and transactions, along with certain SQL performance events, are logged to SQL_EXEC. To continue using the previous logging channels, set `log.channel_compatibility_mode.enabled` to `true`. - `sql.catalog.allow_leased_descriptors.enabled` (default: `true`) - - Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159566][#159566] + - Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. #159566 -[#153364]: https://github.com/cockroachdb/cockroach/pull/153364 -[#154051]: https://github.com/cockroachdb/cockroach/pull/154051 -[#154370]: https://github.com/cockroachdb/cockroach/pull/154370 -[#154412]: https://github.com/cockroachdb/cockroach/pull/154412 -[#154495]: https://github.com/cockroachdb/cockroach/pull/154495 -[#155284]: https://github.com/cockroachdb/cockroach/pull/155284 -[#155385]: https://github.com/cockroachdb/cockroach/pull/155385 -[#155454]: https://github.com/cockroachdb/cockroach/pull/155454 -[#155531]: https://github.com/cockroachdb/cockroach/pull/155531 -[#156303]: https://github.com/cockroachdb/cockroach/pull/156303 -[#158602]: https://github.com/cockroachdb/cockroach/pull/158602 -[#159566]: https://github.com/cockroachdb/cockroach/pull/159566 -[#159677]: https://github.com/cockroachdb/cockroach/pull/159677 -[#160016]: https://github.com/cockroachdb/cockroach/pull/160016 diff --git a/src/current/_includes/releases/v26.1/deprecations.md b/src/current/_includes/releases/v26.1/deprecations.md index ea0487b2fca..23898bde5fa 100644 --- a/src/current/_includes/releases/v26.1/deprecations.md +++ b/src/current/_includes/releases/v26.1/deprecations.md @@ -6,10 +6,8 @@ The following deprecations are announced in v26.1. - `INSPECT` supports a `DETACHED` option to run the operation without waiting for it - For more information, see the [`INSPECT`]({% link v26.1/inspect.md %}) documentation - [#155485][#155485] + #155485 -- **`enable_inspect_command` session variable**: The `enable_inspect_command` session variable has been deprecated and is now effectively always set to `true`. Since `INSPECT` is now a GA feature, this session variable is no longer needed. If you have this variable set in your application configurations, you can safely remove it. [#159750][#159750] +- **`enable_inspect_command` session variable**: The `enable_inspect_command` session variable has been deprecated and is now effectively always set to `true`. Since `INSPECT` is now a GA feature, this session variable is no longer needed. If you have this variable set in your application configurations, you can safely remove it. #159750 -[#155485]: https://github.com/cockroachdb/cockroach/pull/155485 -[#159750]: https://github.com/cockroachdb/cockroach/pull/159750 diff --git a/src/current/_includes/releases/v26.1/v26.1.0-alpha.1.md b/src/current/_includes/releases/v26.1/v26.1.0-alpha.1.md index bcad249ada7..ff3e2591daf 100644 --- a/src/current/_includes/releases/v26.1/v26.1.0-alpha.1.md +++ b/src/current/_includes/releases/v26.1/v26.1.0-alpha.1.md @@ -6,9 +6,9 @@ Release Date: December 4, 2025

General changes

-- Docker images now use UBI 10 as the base image. [#153990][#153990] +- Docker images now use UBI 10 as the base image. #153990 - The changefeed bulk - delivery setting was made optional. [#154870][#154870] + delivery setting was made optional. #154870 - To improve changefeed performance, the session variable `create_table_with_schema_locked` is enabled by default. This means all new tables are created with the `schema_locked` storage parameter. `schema_locked` must be explicitly unset for explicit transactions or for schema changes that do not support automatic disabling (e.g., `ALTER TABLE ... SET LOCALITY`).

SQL language changes

@@ -28,7 +28,7 @@ Release Date: December 4, 2025 - `$.a.b.c == 12`, `$.a.b.c > 12`, or `$.a.b.c < 12` (operation expressions) - `$.a.b ? (@.a > 10)` (filter with an inequality check) - [#150793][#150793] + #150793 - `SHOW CHANGEFEED JOBS` now includes a `database_name` field that displays the database name for database-level changefeeds. For table-level changefeeds, this field is `null`. For database-level changefeeds, the `full_table_names` field now returns an empty list by default and displays only the total count of watched tables. @@ -40,15 +40,15 @@ Release Date: December 4, 2025 This change improves performance when displaying database-level changefeeds that may track many tables. - [#151131][#151131] + #151131 -- Added a clamp for the estimated selectivity of inequality predicates that are unbounded on one or both sides (e.g., `x > 5`). This reduces the risk of a catastrophic underestimate that causes the optimizer to choose a poorly-constrained scan. The feature is disabled by default and can be enabled with the session setting `optimizer_clamp_inequality_selectivity`. [#153067][#153067] -- Added a clamp on row-count estimates for very large tables to ensure the optimizer assumes at least one distinct value will be scanned. This reduces the risk of a catastrophic underestimate. The feature is off by default and controlled by the `optimizer_clamp_low_histogram_selectivity` session setting. [#153067][#153067] -- The optimizer can now use table statistics that merge the latest full statistic with all newer partial statistics, including those over arbitrary constraints over a single span. [#153419][#153419] +- Added a clamp for the estimated selectivity of inequality predicates that are unbounded on one or both sides (e.g., `x > 5`). This reduces the risk of a catastrophic underestimate that causes the optimizer to choose a poorly-constrained scan. The feature is disabled by default and can be enabled with the session setting `optimizer_clamp_inequality_selectivity`. #153067 +- Added a clamp on row-count estimates for very large tables to ensure the optimizer assumes at least one distinct value will be scanned. This reduces the risk of a catastrophic underestimate. The feature is off by default and controlled by the `optimizer_clamp_low_histogram_selectivity` session setting. #153067 +- The optimizer can now use table statistics that merge the latest full statistic with all newer partial statistics, including those over arbitrary constraints over a single span. #153419 -- Added the `sql.catalog.allow_leased_descriptors.enabled` cluster setting, which is false by default. When set to true, queries that access the `pg_catalog` or `information_schema` can use cached leased descriptors to populate the data in those tables, with the tradeoff that some of the data could be stale. [#154051][#154051] -- Added a default-off cluster setting (`sql.log.scan_row_count_misestimate.enabled`) that enables logging a warning on the gateway node when optimizer estimates for scans are inaccurate. The log message includes the table and index being scanned, the estimated and actual row counts, the time since the last table stats collection, and the table's estimated staleness. [#154370][#154370] -- Fixed a bug where the results of `ALTER SEQUENCE`'s increment and `SELECT nextval()` operations were not as expected. The value of a sequence after an `ALTER SEQUENCE` statement has executed on it is now consistent with a sequence created with those values. [#154489][#154489] +- Added the `sql.catalog.allow_leased_descriptors.enabled` cluster setting, which is false by default. When set to true, queries that access the `pg_catalog` or `information_schema` can use cached leased descriptors to populate the data in those tables, with the tradeoff that some of the data could be stale. #154051 +- Added a default-off cluster setting (`sql.log.scan_row_count_misestimate.enabled`) that enables logging a warning on the gateway node when optimizer estimates for scans are inaccurate. The log message includes the table and index being scanned, the estimated and actual row counts, the time since the last table stats collection, and the table's estimated staleness. #154370 +- Fixed a bug where the results of `ALTER SEQUENCE`'s increment and `SELECT nextval()` operations were not as expected. The value of a sequence after an `ALTER SEQUENCE` statement has executed on it is now consistent with a sequence created with those values. #154489 - Added changefeed setting `range_distribution_strategy` with values `'default'` or `'balanced_simple'`. This new per-changefeed setting overrides the cluster setting `changefeed.default_range_distribution_strategy` where both exist. Example: @@ -57,58 +57,58 @@ Release Date: December 4, 2025 CREATE CHANGEFEED FOR x into 'null://' WITH range_distribution_strategy='balanced_simple'; ~~~ - [#154744][#154744] + #154744 -- Added the `INSPECT` command, which runs consistency validation check jobs against tables or databases and specified indexes. [#154873][#154873] +- Added the `INSPECT` command, which runs consistency validation check jobs against tables or databases and specified indexes. #154873 - Added support for collecting partial statistics when the given `WHERE` clause implies the predicate of a partial index with the requested column as the first key column. For example: ~~~ CREATE TABLE t (a INT, INDEX idx_partial (a) WHERE a > 5); CREATE STATISTICS pstat ON a FROM t WHERE a > 7; ~~~ - [#154892][#154892] - -- The `row_security` session variable now behaves as it does in PostgreSQL, allowing users to detect when RLS is applied. [#155110][#155110] -- Added the `bulkio.index_backfill.vector_merge_batch_size` cluster setting to control how many vectors to merge into a vector index per transaction during create operations. The setting defaults to `3`. [#155284][#155284] -- Updated the scan misestimate logging, which is controlled by the `sql.log.scan_row_count_misestimate.enabled` cluster setting, to use structured logging. The logs now include the scanned table and index, the estimated and actual row counts, the time since the last table statistics collection, and the table's estimated staleness. [#155454][#155454] -- The `EXPERIMENTAL SCRUB` command is deprecated. Use the `INSPECT` command for data consistency validation. [#155485][#155485] -- `INSPECT` supports a `DETACHED` option to run the operation without waiting for it. [#155774][#155774] -- `ALTER TABLE ... DROP STORED` statements are now executed internally by the declarative schema changer. [#155778][#155778] -- Added a `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. [#155820][#155820] -- Added the `EXPLAIN (FINGERPRINT)` statement, which returns normalized statement fingerprints with constants replaced by underscores. For example, `EXPLAIN (FINGERPRINT) SELECT * FROM t WHERE a = 123` returns `SELECT * FROM t WHERE a = _`. [#156152][#156152] + #154892 + +- The `row_security` session variable now behaves as it does in PostgreSQL, allowing users to detect when RLS is applied. #155110 +- Added the `bulkio.index_backfill.vector_merge_batch_size` cluster setting to control how many vectors to merge into a vector index per transaction during create operations. The setting defaults to `3`. #155284 +- Updated the scan misestimate logging, which is controlled by the `sql.log.scan_row_count_misestimate.enabled` cluster setting, to use structured logging. The logs now include the scanned table and index, the estimated and actual row counts, the time since the last table statistics collection, and the table's estimated staleness. #155454 +- The `EXPERIMENTAL SCRUB` command is deprecated. Use the `INSPECT` command for data consistency validation. #155485 +- `INSPECT` supports a `DETACHED` option to run the operation without waiting for it. #155774 +- `ALTER TABLE ... DROP STORED` statements are now executed internally by the declarative schema changer. #155778 +- Added a `sql.statements.rows_read.count` metric that counts the number of index rows read by SQL statements. #155820 +- Added the `EXPLAIN (FINGERPRINT)` statement, which returns normalized statement fingerprints with constants replaced by underscores. For example, `EXPLAIN (FINGERPRINT) SELECT * FROM t WHERE a = 123` returns `SELECT * FROM t WHERE a = _`. #156152 - Introduced two new settings to control the use of canary statistics in query planning: - Cluster setting `sql.stats.canary_fraction` (float, range [0, 1], default: 0): Controls what fraction of queries use "canary statistics" (newly collected stats within their canary window) versus "stable statistics" (previously proven stats). For example, a value of `0.2` means 20% of queries will use canary stats while 80% use stable stats. The selection is atomic per query: if a query is chosen for canary evaluation, it uses canary statistics for **all** tables it references (where available). A query never uses a mix of canary and stable statistics. - Session variable `canary_stats_mode` (enum: {auto, off, on}, default: auto): - `on`: All queries in the session use canary stats for planning. - `off`: All queries in the session use stable stats for planning. - - `auto`: The system decides based on `sql.stats.canary_fraction` for each query execution. [#156307][#156307] -- Introduced a new table storage parameter, `sql_stats_canary_window`, to enable gradual rollout of newly collected table statistics. It takes a duration string as the value. When set with a positive duration, the new statistics remain in a "canary" state for the specified duration before being promoted to stable. This allows for controlled exposure and intervention opportunities before statistics are fully deployed across all queries. [#156307][#156307] -- Introduced `SHOW FINGERPRINTS FOR TABLE`, which produces an FNV hash for each index in a table. FNV is used for performance reasons and is sensitive to changes in the underlying data, including `NULL`s. [#156600][#156600] -- The `optimizer_clamp_low_histogram_selectivity` and `optimizer_clamp_inequality_selectivity` settings are now on by default. This causes the optimizer to assume that at least one distinct value "passes" each filter in a query, and that open-ended inequality filters select at least 1/10000 rows from the table. This reduces the chances of a catastrophic row count underestimate when stats are inaccurate. [#156610][#156610] -- The `ALTER TABLE ... SET/ADD GENERATED AS IDENTITY` statement is supported by the declarative schema changer in v26.1 and later. [#157144][#157144] -- `EXPLAIN` and `EXPLAIN ANALYZE` will now display the number of hints from `system.statement_hints` applied to the executed statement. [#157160][#157160] -- The **Plan Details** in the **Statement Activity** page of the DB Console now show whether any hints from `system.statement_hints` were applied to the statement execution. [#157160][#157160] + - `auto`: The system decides based on `sql.stats.canary_fraction` for each query execution. #156307 +- Introduced a new table storage parameter, `sql_stats_canary_window`, to enable gradual rollout of newly collected table statistics. It takes a duration string as the value. When set with a positive duration, the new statistics remain in a "canary" state for the specified duration before being promoted to stable. This allows for controlled exposure and intervention opportunities before statistics are fully deployed across all queries. #156307 +- Introduced `SHOW FINGERPRINTS FOR TABLE`, which produces an FNV hash for each index in a table. FNV is used for performance reasons and is sensitive to changes in the underlying data, including `NULL`s. #156600 +- The `optimizer_clamp_low_histogram_selectivity` and `optimizer_clamp_inequality_selectivity` settings are now on by default. This causes the optimizer to assume that at least one distinct value "passes" each filter in a query, and that open-ended inequality filters select at least 1/10000 rows from the table. This reduces the chances of a catastrophic row count underestimate when stats are inaccurate. #156610 +- The `ALTER TABLE ... SET/ADD GENERATED AS IDENTITY` statement is supported by the declarative schema changer in v26.1 and later. #157144 +- `EXPLAIN` and `EXPLAIN ANALYZE` will now display the number of hints from `system.statement_hints` applied to the executed statement. #157160 +- The **Plan Details** in the **Statement Activity** page of the DB Console now show whether any hints from `system.statement_hints` were applied to the statement execution. #157160

Operational changes

-- The metrics `sql.select.started.count`, `sql.insert.started.count`, `sql.update.started.count`, and `sql.delete.started.count` are now emitted with labels under the common metric name `sql.started.count`, using a `query_type` label to distinguish each operation. [#151946][#151946] -- Added the cluster setting `storage.unhealthy_write_duration` (defaults to 20s), which is used to indicate to the allocator that a store's disk is unhealthy. The cluster setting `kv.allocator.disk_unhealthy_io_overload_score` controls the overload score assigned to a store with an unhealthy disk, where a higher score results in preventing lease or replica transfers to the store, or shedding of leases by the store. The default value of that setting is 0, so the allocator behavior is unaffected. [#153364][#153364] -- Added two new changefeed metrics for tracking the max skew between a changefeed's slowest and fastest span/table. The metrics are gauge metrics with the names `changefeed.progress_skew.{span}` and `changefeed.progress_skew.{table}`. [#153975][#153975] -- Added the cluster setting `storage.snapshot.recreate_iter_duration` (default 20s), which controls how frequently a long-lived storage engine iterator, backed by an engine snapshot, will be closed and recreated. Currently, it is only used for iterators used in rangefeed catchup scans. [#154412][#154412] -- Added cluster setting `sql.schema.approx_max_object_count` (default: 20,000) to prevent creation of new schema objects when the limit is exceeded. The check uses cached table statistics for performance and is approximate - it may not be immediately accurate until table statistics are updated by the background statistics refreshing job. Clusters that have been running stably with a larger object count should raise the limit or disable the limit by setting the value to 0. In future releases, the default value for this setting will be raised as more CockroachDB features support larger object counts. [#154495][#154495] -- Cleaned up redundant and misleading metrics. [#154545][#154545] -- Fixed the `changefeed.parallel_io_pending_rows` metric's y-axis label to match the metric's definition. [#154552][#154552] -- Added a metric called `changefeed.parallel_io_workers` to track the number of workers in ParallelIO. [#154552][#154552] -- Events related to changefeed operations are now routed to the `CHANGEFEED` channel, while sampled queries and transactions, along with certain SQL performance events, are logged to `SQL_EXEC`. To continue using the previous logging channels, set `log.channel_compatibility_mode.enabled` to `true`. [#154670][#154670] -- Successfully completed automatic SQL stats collecton jobs are now automatically purged rather than being retained for the full default job retention period. [#155848][#155848] -- The cluster setting `storage.snapshot.recreate_iter_duration` (default `20s`) controls how frequently a long-lived engine iterator, backed by an engine snapshot, will be closed and recreated. Currently, it is only used for iterators used in rangefeed catchup scans. [#156303][#156303] -- Add support for `CREATE LOGICAL REPLICATION STREAM` in situations where the source table has a column with a sequence expression. [#156975][#156975] +- The metrics `sql.select.started.count`, `sql.insert.started.count`, `sql.update.started.count`, and `sql.delete.started.count` are now emitted with labels under the common metric name `sql.started.count`, using a `query_type` label to distinguish each operation. #151946 +- Added the cluster setting `storage.unhealthy_write_duration` (defaults to 20s), which is used to indicate to the allocator that a store's disk is unhealthy. The cluster setting `kv.allocator.disk_unhealthy_io_overload_score` controls the overload score assigned to a store with an unhealthy disk, where a higher score results in preventing lease or replica transfers to the store, or shedding of leases by the store. The default value of that setting is 0, so the allocator behavior is unaffected. #153364 +- Added two new changefeed metrics for tracking the max skew between a changefeed's slowest and fastest span/table. The metrics are gauge metrics with the names `changefeed.progress_skew.{span}` and `changefeed.progress_skew.{table}`. #153975 +- Added the cluster setting `storage.snapshot.recreate_iter_duration` (default 20s), which controls how frequently a long-lived storage engine iterator, backed by an engine snapshot, will be closed and recreated. Currently, it is only used for iterators used in rangefeed catchup scans. #154412 +- Added cluster setting `sql.schema.approx_max_object_count` (default: 20,000) to prevent creation of new schema objects when the limit is exceeded. The check uses cached table statistics for performance and is approximate - it may not be immediately accurate until table statistics are updated by the background statistics refreshing job. Clusters that have been running stably with a larger object count should raise the limit or disable the limit by setting the value to 0. In future releases, the default value for this setting will be raised as more CockroachDB features support larger object counts. #154495 +- Cleaned up redundant and misleading metrics. #154545 +- Fixed the `changefeed.parallel_io_pending_rows` metric's y-axis label to match the metric's definition. #154552 +- Added a metric called `changefeed.parallel_io_workers` to track the number of workers in ParallelIO. #154552 +- Events related to changefeed operations are now routed to the `CHANGEFEED` channel, while sampled queries and transactions, along with certain SQL performance events, are logged to `SQL_EXEC`. To continue using the previous logging channels, set `log.channel_compatibility_mode.enabled` to `true`. #154670 +- Successfully completed automatic SQL stats collecton jobs are now automatically purged rather than being retained for the full default job retention period. #155848 +- The cluster setting `storage.snapshot.recreate_iter_duration` (default `20s`) controls how frequently a long-lived engine iterator, backed by an engine snapshot, will be closed and recreated. Currently, it is only used for iterators used in rangefeed catchup scans. #156303 +- Add support for `CREATE LOGICAL REPLICATION STREAM` in situations where the source table has a column with a sequence expression. #156975

Command-line changes

- `cockroach workload run` commands now offer a `--with-changefeed` flag to additionally run a changefeed that watches for - writes to the workload's tables. [#155516][#155516] + writes to the workload's tables. #155516

DB Console changes

@@ -116,193 +116,91 @@ Release Date: December 4, 2025 - The **Transaction Restarts** dashboard now displays `txn.restarts.txnpush` and `txn.restarts.unknown` metrics. - A new **Failed SQL Connections** graph shows failed SQL connection attempts. A new **SQL Queries Within Routines Per Second** dashboard reports on `SELECT`, `UPDATE`, `INSERT`, and `DELETE` operations executed within routines. - A new **Table Statistics Collections** dashboard provides information on auto, auto partial, and manual statistics collections. - [#155203][#155203] + #155203 -- The background (elastic) store graphs for exhausted duration, and the wait duration histogram, have been separated from the foreground (regular) graphs. [#156801][#156801] +- The background (elastic) store graphs for exhausted duration, and the wait duration histogram, have been separated from the foreground (regular) graphs. #156801

Bug fixes

-- Previously, CockroachDB would omit execution statistics in `EXPLAIN ANALYZE` output for mutation nodes when a `RETURNING` clause was used. The bug was present since before v21.1 and is now fixed. [#145934][#145934] -- Fixed a bug where CockroachDB could encounter a `vector encoder doesn't support ForcePut yet` error when executing `COPY` commands concurrently with certain schema changes. The bug had existed since before v23.2. [#148549][#148549] -- Fixed a bug causing a `READ COMMITTED` or `SNAPSHOT` isolation transaction to be committed despite returning a non-ambiguous error. [#152010][#152010] -- Fixed a bug in type-checking placeholders with `UNKNOWN` types. It could cause incorrect results in some cases. [#152882][#152882] -- Fixed a bug where `EXPORT CSV` and `EXPORT PARQUET` could cause a node crash when their result rows were used as input to a mutation, such as an `INSERT`, within the same SQL statement. This bug had been present since before v22.1. [#153951][#153951] -- **Idle latency** on the **Transaction Details** page in the DB Console is now reported more accurately. Previously, transactions that used prepared statements (e.g., with placeholders) overcounted idle time, while those that included observer statements (common in the SQL CLI) undercounted it. [#154028][#154028] -- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. [#154162][#154162] -- Vector index backfill jobs now correctly report progress in the `SHOW JOBS` output. [#154209][#154209] -- Fixed a bug where `RESTORE` of a database with a `SECONDARY REGION` did not apply the lease preferences for that region. [#154522][#154522] +- Previously, CockroachDB would omit execution statistics in `EXPLAIN ANALYZE` output for mutation nodes when a `RETURNING` clause was used. The bug was present since before v21.1 and is now fixed. #145934 +- Fixed a bug where CockroachDB could encounter a `vector encoder doesn't support ForcePut yet` error when executing `COPY` commands concurrently with certain schema changes. The bug had existed since before v23.2. #148549 +- Fixed a bug causing a `READ COMMITTED` or `SNAPSHOT` isolation transaction to be committed despite returning a non-ambiguous error. #152010 +- Fixed a bug in type-checking placeholders with `UNKNOWN` types. It could cause incorrect results in some cases. #152882 +- Fixed a bug where `EXPORT CSV` and `EXPORT PARQUET` could cause a node crash when their result rows were used as input to a mutation, such as an `INSERT`, within the same SQL statement. This bug had been present since before v22.1. #153951 +- **Idle latency** on the **Transaction Details** page in the DB Console is now reported more accurately. Previously, transactions that used prepared statements (e.g., with placeholders) overcounted idle time, while those that included observer statements (common in the SQL CLI) undercounted it. #154028 +- Fixed a bug that caused panics when executing `COPY` into a table with hidden columns and expression indexes. The panic only occurred when the session setting `expect_and_ignore_not_visible_columns_in_copy` was enabled. This bug was introduced with `expect_and_ignore_not_visible_columns_in_copy` in v22.1.0. #154162 +- Vector index backfill jobs now correctly report progress in the `SHOW JOBS` output. #154209 +- Fixed a bug where `RESTORE` of a database with a `SECONDARY REGION` did not apply the lease preferences for that region. #154522 - Fixed a bug where a changefeed could perform - many unnecessary job progress saves during an initial scan. [#154598][#154598] -- Fixed a bug where CockroachDB would not log events for `TxnRowsRead` and `TxnRowsWritten` guardrails for internal queries into the `SQL_INTERNAL_PERF` logging channel. The bug was present since v21.2. [#154670][#154670] -- Fixed a bug that caused internal errors for `INSERT .. ON CONFLICT .. DO UPDATE` statements when the target table had both a computed column and a `BEFORE` trigger. This bug was present since triggers were introduced in v24.3.0. [#154789][#154789] -- Fixed a bug where a changefeed targeting only a subset of a table's column families could become stuck. [#154802][#154802] -- Fixed a bug where CockroachDB would hit an internal error when performing an inverted join using an inverted index in which the first prefix column had `DESC` direction. The bug was present since v21.1. [#154914][#154914] -- Fixed a bug where the `kvflowcontrol.send_queue.scheduled.force_flush` metric was missing a decrement, resulting in a value of greater than `0` even when there was no ongoing force flush. [#154960][#154960] -- Fixed a bug that would cause `WITH READ VIRTUAL CLUSTER` to be ignored if any other options were passed when running `CREATE VIRTUAL CLUSTER FROM REPLICATION`. [#154963][#154963] -- Internal assertions that verify `NumRange = 0` in the first histogram bucket, used to catch malformed statistics, now run only in test builds to avoid crashing production queries. [#155035][#155035] -- Fixed a bug in which range counts in table statistics histograms were not handled correctly after a user-defined `ENUM` type was modified. [#155035][#155035] -- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. [#155063][#155063] -- Fixed a bug where a race condition in range splits could result in a regressed Raft state on a post-split range. This condition was extremely rare, and only observed during internal testing. [#155143][#155143] -- Corrected a potential deadlock during vector index creation. [#155192][#155192] + many unnecessary job progress saves during an initial scan. #154598 +- Fixed a bug where CockroachDB would not log events for `TxnRowsRead` and `TxnRowsWritten` guardrails for internal queries into the `SQL_INTERNAL_PERF` logging channel. The bug was present since v21.2. #154670 +- Fixed a bug that caused internal errors for `INSERT .. ON CONFLICT .. DO UPDATE` statements when the target table had both a computed column and a `BEFORE` trigger. This bug was present since triggers were introduced in v24.3.0. #154789 +- Fixed a bug where a changefeed targeting only a subset of a table's column families could become stuck. #154802 +- Fixed a bug where CockroachDB would hit an internal error when performing an inverted join using an inverted index in which the first prefix column had `DESC` direction. The bug was present since v21.1. #154914 +- Fixed a bug where the `kvflowcontrol.send_queue.scheduled.force_flush` metric was missing a decrement, resulting in a value of greater than `0` even when there was no ongoing force flush. #154960 +- Fixed a bug that would cause `WITH READ VIRTUAL CLUSTER` to be ignored if any other options were passed when running `CREATE VIRTUAL CLUSTER FROM REPLICATION`. #154963 +- Internal assertions that verify `NumRange = 0` in the first histogram bucket, used to catch malformed statistics, now run only in test builds to avoid crashing production queries. #155035 +- Fixed a bug in which range counts in table statistics histograms were not handled correctly after a user-defined `ENUM` type was modified. #155035 +- Fixed a bug in the `cockroach node drain` command where draining a node using virtual clusters (such as clusters running Physical Cluster Replication (PCR)) could return before the drain was complete, possibly resulting in shutting down the node while it still had active SQL clients and range leases. #155063 +- Fixed a bug where a race condition in range splits could result in a regressed Raft state on a post-split range. This condition was extremely rare, and only observed during internal testing. #155143 +- Corrected a potential deadlock during vector index creation. #155192 - Fixed a bug that would result in a node crash if a - PCR or LDR URI used `sslinline=true` with `sslmode=disable`. [#155232][#155232] -- Fixed a bug where CockroachDB could corrupt the first bucket of table statistic histograms in certain cases, causing underestimates for range counts near the lower end of the domain. [#155242][#155242] -- Added proper dependency handling when adding a constraint with `NOT VALID` that references a user-defined function (UDF). [#155404][#155404] -- Fixed a bug that prevented the optimizer from recognizing correlated filters when one of the filtered columns had a single distinct value across all rows. This could lead to suboptimal query plans in some cases. [#155407][#155407] -- The username remapping functionality specified by the `server.identity_map.configuration` cluster setting now matches identities and usernames with a case-insensitive comparison. [#155531][#155531] -- Previously, the forecasted statistics shown in `SHOW STATISTICS ... WITH FORECAST` could be inconsistent with those in the stats cache, depending on whether `WITH MERGE` was specified. Forecasted statistics are now displayed consistently, regardless of the `WITH MERGE` clause. [#155615][#155615] -- Fixed a bug where CockroachDB could crash when executing `EXPLAIN ANALYZE` statements using the pausable portal model. This would occur when the query was executed via the extended PGWire protocol (`Parse`, `Bind`, `Execute`) with the `multiple_active_portals_enabled` session variable set. The bug was present since v23.2. [#155655][#155655] -- `INSPECT` can now be run on tables with indexes that store `REFCURSOR`-typed columns. [#155772][#155772] -- Fixes a bug where `DROP SCHEMA CASCADE` could run into an error with complex references from triggers. [#155777][#155777] -- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. Fixes: #155165 [#155809][#155809] -- Fixed a bug where reads and writes performed by routines (user-defined functions and stored procedures) and apply joins were not included in `bytes read`, `rows read`, and `rows written` statement execution statistics. This bug had been present since before v23.2. [#155824][#155824] -- `INSPECT` now correctly checks index consistency at the historical timestamp when using `AS OF SYSTEM TIME`, even for spans with no current data. [#155837][#155837] -- The `INSPECT` statement now detects dangling secondary index entries even when the primary index spans contain no data. [#155844][#155844] -- Fixed an internal error that could occur when replacing a user-defined function or stored procedure using `CREATE OR REPLACE`, if the existing signature included multiple `DEFAULT` expressions. This bug was introduced in v24.2, when support for `DEFAULT` expressions was added. [#155867][#155867] -- `INSPECT` no longer fails when checking index consistency on indexes with virtual key columns. Such indexes will now be skipped. [#155956][#155956] -- Fixed a bug where DML statements on regional by row tables with unique indexes that do not reference the region could sometimes fail under `READ COMMITTED` isolation. [#156105][#156105] -- Fixed a bug where Zone Config Extensions incorrectly prevented users from removing non-voting read replicas from multi-region databases. Users can now set `num_replicas` equal to `num_voters` to remove read replicas while maintaining the required number of voting replicas for their database's survival goal. This allows reducing storage costs without compromising availability guarantees. [#156228][#156228] -- Fixed a bug in the `ltree2text` built-in function where the returned `TEXT` value was incorrectly wrapped in single quotes. This bug had been present since the `ltree2text` function was introduced in v25.4.0. [#156485][#156485] -- Fixed a bug that caused incorrect results for queries that filter indexed `LTREE` columns with the `<@` (contained-by) operator. This bug was present since v25.4.0. [#156573][#156573] -- Fixed a bug where the "atomic" `COPY` command (controlled via the `copy_from_atomic_enabled` session setting, `true` by default) could encounter `RETRY_COMMIT_DEADLINE_EXCEEDED` transaction errors if the whole command took 1 minute or more. This bug occurred only when the vectorized engine was used for `COPY`. [#156584][#156584] -- Fixed a bug that caused transactions to fail with the error message: `failed indeterminate commit recovery: programming error: timestamp change by implicitly committed transaction`. [#156722][#156722] -- Fixed a bug in JSONPath index acceleration where queries using `jsonb_path_exists` with a root key (e.g., `$.b`) incorrectly returned no results when the queried JSON was an array. This fix enables unwrapping a single array layer at the root, allowing the path to be evaluated against each element. Only v25.4 releases were affected. [#156828][#156828] -- Fixed a bug that prevents large `TRUNCATE` operations from completing due to `command is too large` errors. [#156867][#156867] -- Fixed a bug that caused incorrect `gossip.callbacks.pending_duration` metric values to be recorded. [#156939][#156939] -- Fixed a bug where transactions running concurrently with a `GRANT` or `REVOKE` on virtual tables or via external connections could observe modifications incorrectly. [#156949][#156949] -- Fixed a bug where CockroachDB could encounter an internal error when evaluating a `COPY FROM` command in a transaction after it was rolled back to a savepoint. The bug was present since before v23.2. [#156959][#156959] -- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. [#156962][#156962] -- Fixed a bug that could cause a schema change to be stuck in the reverting state if the `infer_rbr_region_col_using_constraint` storage parameter was being set at the same time as adding a constraint that had a foreign key violation. [#157834][#157834] + PCR or LDR URI used `sslinline=true` with `sslmode=disable`. #155232 +- Fixed a bug where CockroachDB could corrupt the first bucket of table statistic histograms in certain cases, causing underestimates for range counts near the lower end of the domain. #155242 +- Added proper dependency handling when adding a constraint with `NOT VALID` that references a user-defined function (UDF). #155404 +- Fixed a bug that prevented the optimizer from recognizing correlated filters when one of the filtered columns had a single distinct value across all rows. This could lead to suboptimal query plans in some cases. #155407 +- The username remapping functionality specified by the `server.identity_map.configuration` cluster setting now matches identities and usernames with a case-insensitive comparison. #155531 +- Previously, the forecasted statistics shown in `SHOW STATISTICS ... WITH FORECAST` could be inconsistent with those in the stats cache, depending on whether `WITH MERGE` was specified. Forecasted statistics are now displayed consistently, regardless of the `WITH MERGE` clause. #155615 +- Fixed a bug where CockroachDB could crash when executing `EXPLAIN ANALYZE` statements using the pausable portal model. This would occur when the query was executed via the extended PGWire protocol (`Parse`, `Bind`, `Execute`) with the `multiple_active_portals_enabled` session variable set. The bug was present since v23.2. #155655 +- `INSPECT` can now be run on tables with indexes that store `REFCURSOR`-typed columns. #155772 +- Fixes a bug where `DROP SCHEMA CASCADE` could run into an error with complex references from triggers. #155777 +- Fixed a bug where the job responsible for compacting stats for the SQL activity state could enter an unschedulable state. Fixes: #155165 #155809 +- Fixed a bug where reads and writes performed by routines (user-defined functions and stored procedures) and apply joins were not included in `bytes read`, `rows read`, and `rows written` statement execution statistics. This bug had been present since before v23.2. #155824 +- `INSPECT` now correctly checks index consistency at the historical timestamp when using `AS OF SYSTEM TIME`, even for spans with no current data. #155837 +- The `INSPECT` statement now detects dangling secondary index entries even when the primary index spans contain no data. #155844 +- Fixed an internal error that could occur when replacing a user-defined function or stored procedure using `CREATE OR REPLACE`, if the existing signature included multiple `DEFAULT` expressions. This bug was introduced in v24.2, when support for `DEFAULT` expressions was added. #155867 +- `INSPECT` no longer fails when checking index consistency on indexes with virtual key columns. Such indexes will now be skipped. #155956 +- Fixed a bug where DML statements on regional by row tables with unique indexes that do not reference the region could sometimes fail under `READ COMMITTED` isolation. #156105 +- Fixed a bug where Zone Config Extensions incorrectly prevented users from removing non-voting read replicas from multi-region databases. Users can now set `num_replicas` equal to `num_voters` to remove read replicas while maintaining the required number of voting replicas for their database's survival goal. This allows reducing storage costs without compromising availability guarantees. #156228 +- Fixed a bug in the `ltree2text` built-in function where the returned `TEXT` value was incorrectly wrapped in single quotes. This bug had been present since the `ltree2text` function was introduced in v25.4.0. #156485 +- Fixed a bug that caused incorrect results for queries that filter indexed `LTREE` columns with the `<@` (contained-by) operator. This bug was present since v25.4.0. #156573 +- Fixed a bug where the "atomic" `COPY` command (controlled via the `copy_from_atomic_enabled` session setting, `true` by default) could encounter `RETRY_COMMIT_DEADLINE_EXCEEDED` transaction errors if the whole command took 1 minute or more. This bug occurred only when the vectorized engine was used for `COPY`. #156584 +- Fixed a bug that caused transactions to fail with the error message: `failed indeterminate commit recovery: programming error: timestamp change by implicitly committed transaction`. #156722 +- Fixed a bug in JSONPath index acceleration where queries using `jsonb_path_exists` with a root key (e.g., `$.b`) incorrectly returned no results when the queried JSON was an array. This fix enables unwrapping a single array layer at the root, allowing the path to be evaluated against each element. Only v25.4 releases were affected. #156828 +- Fixed a bug that prevents large `TRUNCATE` operations from completing due to `command is too large` errors. #156867 +- Fixed a bug that caused incorrect `gossip.callbacks.pending_duration` metric values to be recorded. #156939 +- Fixed a bug where transactions running concurrently with a `GRANT` or `REVOKE` on virtual tables or via external connections could observe modifications incorrectly. #156949 +- Fixed a bug where CockroachDB could encounter an internal error when evaluating a `COPY FROM` command in a transaction after it was rolled back to a savepoint. The bug was present since before v23.2. #156959 +- Fixed a bug that could cause internal errors for queries using generic query plans with `NULL` placeholder values. #156962 +- Fixed a bug that could cause a schema change to be stuck in the reverting state if the `infer_rbr_region_col_using_constraint` storage parameter was being set at the same time as adding a constraint that had a foreign key violation. #157834

Performance improvements

-- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., `x = $1`). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. [#151409][#151409] -- TTL jobs now checkpoint their progress, allowing them to resume without reprocessing already completed spans after a restart. [#152618][#152618] -- Queries with filters in the form `a LIKE b ESCAPE '\'` are now index-accelerated in certain cases where they were not before. [#155064][#155064] -- The optimizer will no longer choose a generic query plan with unbounded cardinality over a custom query plan with bounded cardinality, regardless of `optimizer_prefer_bounded_cardinality`, better optimizing such queries. [#155163][#155163] -- Optimized validation queries during `ALTER PRIMARY KEY` to avoid counting the primary key multiple times. [#156889][#156889] -- The optimizer now splits disjunctions on the same column into unions when there are multiple partial indexes with different predicates referencing that column. [#157083][#157083] +- The cost of generic query plans is now calculated based on worst-case selectivities for placeholder equalities (e.g., `x = $1`). This reduces the chance of suboptimal generic query plans being chosen when `plan_cache_mode=auto`. #151409 +- TTL jobs now checkpoint their progress, allowing them to resume without reprocessing already completed spans after a restart. #152618 +- Queries with filters in the form `a LIKE b ESCAPE '\'` are now index-accelerated in certain cases where they were not before. #155064 +- The optimizer will no longer choose a generic query plan with unbounded cardinality over a custom query plan with bounded cardinality, regardless of `optimizer_prefer_bounded_cardinality`, better optimizing such queries. #155163 +- Optimized validation queries during `ALTER PRIMARY KEY` to avoid counting the primary key multiple times. #156889 +- The optimizer now splits disjunctions on the same column into unions when there are multiple partial indexes with different predicates referencing that column. #157083

Build changes

-- Upgraded to Go version 1.25.3 [#156000][#156000] +- Upgraded to Go version 1.25.3 #156000

Miscellaneous

- Added initial and catchup scan metrics to Physical Cluster Replication (PCR) under `physical_replication.scanning_ranges` and - `physical_replication.catchup_ranges`. [#153893][#153893] -- Added a retry policy for Azure Blob Storage with a default of 60 seconds to mitigate occasional stuck operations. The retry policy is configurable with the `cloudstorage.azure.try.timeout` setting. [#154149][#154149] + `physical_replication.catchup_ranges`. #153893 +- Added a retry policy for Azure Blob Storage with a default of 60 seconds to mitigate occasional stuck operations. The retry policy is configurable with the `cloudstorage.azure.try.timeout` setting. #154149 - Logical Data Replication (LDR) now updates the `logical_replication.scanning_ranges` - and `logical_replication.catchup_ranges` metrics during fast initial scan. [#155274][#155274] -- Added the `jobs.registry.max_adoptions_per_loop` cluster setting to configure the maximum number of jobs a node can adopt per adoption loop. [#155385][#155385] -- Fixed a bug that prevented admin users from having full access to external connections created by other users. [#155657][#155657] + and `logical_replication.catchup_ranges` metrics during fast initial scan. #155274 +- Added the `jobs.registry.max_adoptions_per_loop` cluster setting to configure the maximum number of jobs a node can adopt per adoption loop. #155385 +- Fixed a bug that prevented admin users from having full access to external connections created by other users. #155657 - LDR no longer requires the database name to be specified - in the external connection URI when setting up a bidirectional stream. [#155729][#155729] + in the external connection URI when setting up a bidirectional stream. #155729 - Span config reconciliation jobs no longer fail on the - destination after failover from a PCR stream of a system virtual cluster. [#156003][#156003] -- Added support of partial indexes to Logical Data Replication, tolerant of mismatched column IDs in the source and destination tables. [#156935][#156935] -- Display whether build is FIPS-enabled in `cockroach version` [#157223][#157223] - -[#155485]: https://github.com/cockroachdb/cockroach/pull/155485 -[#154892]: https://github.com/cockroachdb/cockroach/pull/154892 -[#151946]: https://github.com/cockroachdb/cockroach/pull/151946 -[#155192]: https://github.com/cockroachdb/cockroach/pull/155192 -[#151131]: https://github.com/cockroachdb/cockroach/pull/151131 -[#156105]: https://github.com/cockroachdb/cockroach/pull/156105 -[#155615]: https://github.com/cockroachdb/cockroach/pull/155615 -[#155837]: https://github.com/cockroachdb/cockroach/pull/155837 -[#156889]: https://github.com/cockroachdb/cockroach/pull/156889 -[#154744]: https://github.com/cockroachdb/cockroach/pull/154744 -[#155143]: https://github.com/cockroachdb/cockroach/pull/155143 -[#155774]: https://github.com/cockroachdb/cockroach/pull/155774 -[#154873]: https://github.com/cockroachdb/cockroach/pull/154873 -[#157160]: https://github.com/cockroachdb/cockroach/pull/157160 -[#154028]: https://github.com/cockroachdb/cockroach/pull/154028 -[#155035]: https://github.com/cockroachdb/cockroach/pull/155035 -[#155242]: https://github.com/cockroachdb/cockroach/pull/155242 -[#155407]: https://github.com/cockroachdb/cockroach/pull/155407 -[#155844]: https://github.com/cockroachdb/cockroach/pull/155844 -[#154963]: https://github.com/cockroachdb/cockroach/pull/154963 -[#155655]: https://github.com/cockroachdb/cockroach/pull/155655 -[#155064]: https://github.com/cockroachdb/cockroach/pull/155064 -[#153067]: https://github.com/cockroachdb/cockroach/pull/153067 -[#155063]: https://github.com/cockroachdb/cockroach/pull/155063 -[#155203]: https://github.com/cockroachdb/cockroach/pull/155203 -[#155163]: https://github.com/cockroachdb/cockroach/pull/155163 -[#153893]: https://github.com/cockroachdb/cockroach/pull/153893 -[#154522]: https://github.com/cockroachdb/cockroach/pull/154522 -[#155777]: https://github.com/cockroachdb/cockroach/pull/155777 -[#154051]: https://github.com/cockroachdb/cockroach/pull/154051 -[#154412]: https://github.com/cockroachdb/cockroach/pull/154412 -[#152882]: https://github.com/cockroachdb/cockroach/pull/152882 -[#155404]: https://github.com/cockroachdb/cockroach/pull/155404 -[#150793]: https://github.com/cockroachdb/cockroach/pull/150793 -[#155516]: https://github.com/cockroachdb/cockroach/pull/155516 -[#156722]: https://github.com/cockroachdb/cockroach/pull/156722 -[#154870]: https://github.com/cockroachdb/cockroach/pull/154870 -[#155385]: https://github.com/cockroachdb/cockroach/pull/155385 -[#155848]: https://github.com/cockroachdb/cockroach/pull/155848 -[#156962]: https://github.com/cockroachdb/cockroach/pull/156962 -[#157083]: https://github.com/cockroachdb/cockroach/pull/157083 -[#154370]: https://github.com/cockroachdb/cockroach/pull/154370 -[#155274]: https://github.com/cockroachdb/cockroach/pull/155274 -[#153990]: https://github.com/cockroachdb/cockroach/pull/153990 -[#156000]: https://github.com/cockroachdb/cockroach/pull/156000 -[#155778]: https://github.com/cockroachdb/cockroach/pull/155778 -[#154598]: https://github.com/cockroachdb/cockroach/pull/154598 -[#156573]: https://github.com/cockroachdb/cockroach/pull/156573 -[#155454]: https://github.com/cockroachdb/cockroach/pull/155454 -[#156307]: https://github.com/cockroachdb/cockroach/pull/156307 -[#145934]: https://github.com/cockroachdb/cockroach/pull/145934 -[#154162]: https://github.com/cockroachdb/cockroach/pull/154162 -[#155772]: https://github.com/cockroachdb/cockroach/pull/155772 -[#156584]: https://github.com/cockroachdb/cockroach/pull/156584 -[#157834]: https://github.com/cockroachdb/cockroach/pull/157834 -[#156949]: https://github.com/cockroachdb/cockroach/pull/156949 -[#157223]: https://github.com/cockroachdb/cockroach/pull/157223 -[#154545]: https://github.com/cockroachdb/cockroach/pull/154545 -[#155284]: https://github.com/cockroachdb/cockroach/pull/155284 -[#153975]: https://github.com/cockroachdb/cockroach/pull/153975 -[#153951]: https://github.com/cockroachdb/cockroach/pull/153951 -[#154489]: https://github.com/cockroachdb/cockroach/pull/154489 -[#155232]: https://github.com/cockroachdb/cockroach/pull/155232 -[#156828]: https://github.com/cockroachdb/cockroach/pull/156828 -[#157144]: https://github.com/cockroachdb/cockroach/pull/157144 -[#156303]: https://github.com/cockroachdb/cockroach/pull/156303 -[#148549]: https://github.com/cockroachdb/cockroach/pull/148549 -[#154960]: https://github.com/cockroachdb/cockroach/pull/154960 -[#155956]: https://github.com/cockroachdb/cockroach/pull/155956 -[#152618]: https://github.com/cockroachdb/cockroach/pull/152618 -[#155729]: https://github.com/cockroachdb/cockroach/pull/155729 -[#156003]: https://github.com/cockroachdb/cockroach/pull/156003 -[#155809]: https://github.com/cockroachdb/cockroach/pull/155809 -[#155867]: https://github.com/cockroachdb/cockroach/pull/155867 -[#156228]: https://github.com/cockroachdb/cockroach/pull/156228 -[#156939]: https://github.com/cockroachdb/cockroach/pull/156939 -[#154495]: https://github.com/cockroachdb/cockroach/pull/154495 -[#154209]: https://github.com/cockroachdb/cockroach/pull/154209 -[#156485]: https://github.com/cockroachdb/cockroach/pull/156485 -[#153419]: https://github.com/cockroachdb/cockroach/pull/153419 -[#155820]: https://github.com/cockroachdb/cockroach/pull/155820 -[#156152]: https://github.com/cockroachdb/cockroach/pull/156152 -[#152010]: https://github.com/cockroachdb/cockroach/pull/152010 -[#155531]: https://github.com/cockroachdb/cockroach/pull/155531 -[#151409]: https://github.com/cockroachdb/cockroach/pull/151409 -[#154552]: https://github.com/cockroachdb/cockroach/pull/154552 -[#156975]: https://github.com/cockroachdb/cockroach/pull/156975 -[#154914]: https://github.com/cockroachdb/cockroach/pull/154914 -[#156867]: https://github.com/cockroachdb/cockroach/pull/156867 -[#154149]: https://github.com/cockroachdb/cockroach/pull/154149 -[#155657]: https://github.com/cockroachdb/cockroach/pull/155657 -[#155110]: https://github.com/cockroachdb/cockroach/pull/155110 -[#153364]: https://github.com/cockroachdb/cockroach/pull/153364 -[#154670]: https://github.com/cockroachdb/cockroach/pull/154670 -[#155824]: https://github.com/cockroachdb/cockroach/pull/155824 -[#156600]: https://github.com/cockroachdb/cockroach/pull/156600 -[#156801]: https://github.com/cockroachdb/cockroach/pull/156801 -[#154789]: https://github.com/cockroachdb/cockroach/pull/154789 -[#154802]: https://github.com/cockroachdb/cockroach/pull/154802 -[#156959]: https://github.com/cockroachdb/cockroach/pull/156959 -[#156935]: https://github.com/cockroachdb/cockroach/pull/156935 -[#156610]: https://github.com/cockroachdb/cockroach/pull/156610 + destination after failover from a PCR stream of a system virtual cluster. #156003 +- Added support of partial indexes to Logical Data Replication, tolerant of mismatched column IDs in the source and destination tables. #156935 +- Display whether build is FIPS-enabled in `cockroach version` #157223 + diff --git a/src/current/_includes/releases/v26.1/v26.1.0-alpha.2.md b/src/current/_includes/releases/v26.1/v26.1.0-alpha.2.md index 2f6e40ff280..ce5832702e2 100644 --- a/src/current/_includes/releases/v26.1/v26.1.0-alpha.2.md +++ b/src/current/_includes/releases/v26.1/v26.1.0-alpha.2.md @@ -10,18 +10,18 @@ Release Date: December 11, 2025 - A new `debug_user` certificate has also been introduced for privileged RPC access to collect [debug zip]({% link v26.1/cockroach-debug-zip.md %}) information, which would otherwise be unavailable when `root` is restricted. `debug_user` must be created manually with the `CREATE USER` command and can be audited using `SHOW USERS`. It has privileged access to the `serverpb` admin and status endpoints required for debug zip collection. - Ensure that none of the certificates used by the cluster or SQL/RPC clients have "root" in the SAN (Subject Alternative Name) fields, as the flag will block access to those clients. - [#155216][#155216] + #155216

SQL language changes

-- Added a new session variable, `use_swap_mutations`, which controls whether the new update swap and delete swap operators are enabled for use by `UPDATE` and `DELETE` statements. [#145019][#145019] -- Fixed a bug where the results of `ALTER SEQUENCE`'s increment and `SELECT nextval()` operations were not as expected. The value of a sequence after an `ALTER SEQUENCE` statement has executed on it is now consistent with a sequence created with those values. [#154489][#154489] -- SQL statements executed in stored procedures and user-defined functions now record SQL statistics, including latencies and execution metrics. These statistics appear on the **SQL Activity** and **Insights** pages of the DB Console. Limitation: SQL statements within a stored procedure or user-defined function are not collected for active statement diagnostics requests. Statement diagnostics remain available for top-level statement executions. [#156905][#156905] +- Added a new session variable, `use_swap_mutations`, which controls whether the new update swap and delete swap operators are enabled for use by `UPDATE` and `DELETE` statements. #145019 +- Fixed a bug where the results of `ALTER SEQUENCE`'s increment and `SELECT nextval()` operations were not as expected. The value of a sequence after an `ALTER SEQUENCE` statement has executed on it is now consistent with a sequence created with those values. #154489 +- SQL statements executed in stored procedures and user-defined functions now record SQL statistics, including latencies and execution metrics. These statistics appear on the **SQL Activity** and **Insights** pages of the DB Console. Limitation: SQL statements within a stored procedure or user-defined function are not collected for active statement diagnostics requests. Statement diagnostics remain available for top-level statement executions. #156905 - The `ALTER COLUMN ...` sequence identity - commands are run by the declarative schema changer. [#157030][#157030] -- The `cumulative time spent waiting in admission control` is now displayed in `EXPLAIN ANALYZE` output when it is non-zero. This helps identify delays caused by admission control during query execution. [#158055][#158055] -- Restarting a sequence with an updated increment has the expected initial value. [#158065][#158065] -- The `cumulative time spent waiting in admission control` reported in `EXPLAIN ANALYZE` now includes the time spent in quorum replication flow control. This update enhances the precision of wait time analysis, offering a more accurate depiction of query execution time by considering additional wait durations within the quorum replication processes. [#158076][#158076] + commands are run by the declarative schema changer. #157030 +- The `cumulative time spent waiting in admission control` is now displayed in `EXPLAIN ANALYZE` output when it is non-zero. This helps identify delays caused by admission control during query execution. #158055 +- Restarting a sequence with an updated increment has the expected initial value. #158065 +- The `cumulative time spent waiting in admission control` reported in `EXPLAIN ANALYZE` now includes the time spent in quorum replication flow control. This update enhances the precision of wait time analysis, offering a more accurate depiction of query execution time by considering additional wait durations within the quorum replication processes. #158076 - Added a new "hint injection" ability that allows operators to dynamically inject inline hints into statements, without modifying the text of those statements. Hints can be injected using the built-in function `crdb_internal.inject_hint` with the target statement fingerprint to rewrite. For example, to add an index hint to the statement `SELECT * FROM my_table WHERE col = 3`, use: ~~~ @@ -31,15 +31,15 @@ Release Date: December 11, 2025 ); ~~~ - Whenever a statement is executed matching statement fingerprint `SELECT * FROM my_table WHERE col = _`, it will first be rewritten to include the injected index hint. [#158096][#158096] -- `ALTER TABLE ... SET SCHEMA` is supported by the declarative schema changer. [#158141][#158141] -- `kv cpu time` is now displayed in `EXPLAIN ANALYZE`, providing insights into the CPU resources used by KV operations during query execution. [#158499][#158499] -- CockroachDB now negotiates the pgwire protocol version with PostgreSQL 18+ clients that request protocol version 3.2. Previously, connections from these clients would fail with an "unknown protocol version" error. The server now sends a `NegotiateProtocolVersion` message to indicate it supports version 3.0, allowing the connection to proceed normally. [#158636][#158636] + Whenever a statement is executed matching statement fingerprint `SELECT * FROM my_table WHERE col = _`, it will first be rewritten to include the injected index hint. #158096 +- `ALTER TABLE ... SET SCHEMA` is supported by the declarative schema changer. #158141 +- `kv cpu time` is now displayed in `EXPLAIN ANALYZE`, providing insights into the CPU resources used by KV operations during query execution. #158499 +- CockroachDB now negotiates the pgwire protocol version with PostgreSQL 18+ clients that request protocol version 3.2. Previously, connections from these clients would fail with an "unknown protocol version" error. The server now sends a `NegotiateProtocolVersion` message to indicate it supports version 3.0, allowing the connection to proceed normally. #158636

Operational changes

-- The `allow_unsafe_internals` setting now defaults to `false`, restricting access to the `system` and `crdb_internal` namespaces. Queries to these namespaces will now fail unless access is manually enabled. Usage is also audited. [#158085][#158085] -- Jobs that are paused due to a specific reason, including jobs which pause themselves when encountering errors such as running out of disk space, now record that reason in their displayed status field of `SHOW JOBS`. [#158350][#158350] +- The `allow_unsafe_internals` setting now defaults to `false`, restricting access to the `system` and `crdb_internal` namespaces. Queries to these namespaces will now fail unless access is manually enabled. Usage is also audited. #158085 +- Jobs that are paused due to a specific reason, including jobs which pause themselves when encountering errors such as running out of disk space, now record that reason in their displayed status field of `SHOW JOBS`. #158350 - The following metrics are now marked as essential to support end-user troubleshooting of authentication latency issues: - `auth.jwt.conn.latency` - `auth.cert.conn.latency` @@ -47,26 +47,26 @@ Release Date: December 11, 2025 - `auth.ldap.conn.latency` - `auth.gss.conn.latency` - `auth.scram.conn.latency` - - `auth.ldap.conn.latency.internal` [#158424][#158424] + - `auth.ldap.conn.latency.internal` #158424

DB Console changes

-- The log of messages and events recorded by a job is now shown to non-admin users on the DB Console Jobs page. [#152853][#152853] -- The DB Console now accurately displays **vCPU** counts on the **Overview** page instead of operating system CPU counts. This update uses cgroups to provide a correct vCPU measurement, reflecting reserved compute resources in Kubernetes and other virtualized environments. [#158219][#158219] -- Jobs which are paused for a specific reason now show that reason, and are highlighted in the UI. [#158364][#158364] -- The `kvCPUTimeNanos` is now recorded in `crdb_internal.statement_statistics` and `crdb_internal.transaction_statistics`. In the DB Console, a **KV CPU Time** column is now displayed in the **SQL Activity** > **Statements** and **Transactions** pages, allowing you to monitor and analyze the CPU time consumed by KV operations during query execution. [#158398][#158398] -- The `admissionWaitTime` is now recorded in `crdb_internal.statement_statistics` and `crdb_internal.transaction_statistics`. In the DB Console, an **Admission Wait Time** column is now displayed in the **SQL Activity** > **Statements** and **Transactions** pages. [#158500][#158500] +- The log of messages and events recorded by a job is now shown to non-admin users on the DB Console Jobs page. #152853 +- The DB Console now accurately displays **vCPU** counts on the **Overview** page instead of operating system CPU counts. This update uses cgroups to provide a correct vCPU measurement, reflecting reserved compute resources in Kubernetes and other virtualized environments. #158219 +- Jobs which are paused for a specific reason now show that reason, and are highlighted in the UI. #158364 +- The `kvCPUTimeNanos` is now recorded in `crdb_internal.statement_statistics` and `crdb_internal.transaction_statistics`. In the DB Console, a **KV CPU Time** column is now displayed in the **SQL Activity** > **Statements** and **Transactions** pages, allowing you to monitor and analyze the CPU time consumed by KV operations during query execution. #158398 +- The `admissionWaitTime` is now recorded in `crdb_internal.statement_statistics` and `crdb_internal.transaction_statistics`. In the DB Console, an **Admission Wait Time** column is now displayed in the **SQL Activity** > **Statements** and **Transactions** pages. #158500

Bug fixes

-- Fixed a bug that could cause an internal error in some cases for PL/pgSQL routines that perform database reads within an exception block. [#156902][#156902] -- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. [#156966][#156966] -- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. [#158045][#158045] -- Fixed a bug where `ORDER BY` clauses in user-defined set-returning SQL functions with `OUT` parameters were ignored when the function was called directly in a `SELECT` list (e.g., `SELECT f()`). The ordering is now properly preserved and enforced. [#158162][#158162] -- The pgwire server now exits promptly on context cancellation. [#158269][#158269] -- Fixed a bounded memory leak that could occur during table statistics collection on tables that contain both very wide (10 KiB or more) and small (under 400B) `BYTES`-like values within the same row, along with virtual computed columns. This bug had been present since stats collection on virtual computed columns was introduced in v24.1. [#158370][#158370] -- Temporary schema cleanup no longer retries after poisoned transaction errors, reducing log noise. [#158396][#158396] -- When changing the time interval on the **Metrics** page, the DB Console previously sent duplicate requests for metrics data. This has been fixed, and the UI now issues a single, efficient request when updating the time interval. [#158595][#158595] +- Fixed a bug that could cause an internal error in some cases for PL/pgSQL routines that perform database reads within an exception block. #156902 +- Fixed a bug where a SQL statement with side effects (e.g., `INSERT`) inside a PL/pgSQL routine could be dropped if it used an `INTO` clause and none of the target variables were referenced. This bug had been present since v23.2. #156966 +- Fixed a bug where renaming a column that participated in multiple hash-sharded indexes would fail. #158045 +- Fixed a bug where `ORDER BY` clauses in user-defined set-returning SQL functions with `OUT` parameters were ignored when the function was called directly in a `SELECT` list (e.g., `SELECT f()`). The ordering is now properly preserved and enforced. #158162 +- The pgwire server now exits promptly on context cancellation. #158269 +- Fixed a bounded memory leak that could occur during table statistics collection on tables that contain both very wide (10 KiB or more) and small (under 400B) `BYTES`-like values within the same row, along with virtual computed columns. This bug had been present since stats collection on virtual computed columns was introduced in v24.1. #158370 +- Temporary schema cleanup no longer retries after poisoned transaction errors, reducing log noise. #158396 +- When changing the time interval on the **Metrics** page, the DB Console previously sent duplicate requests for metrics data. This has been fixed, and the UI now issues a single, efficient request when updating the time interval. #158595

Performance improvements

@@ -85,38 +85,8 @@ Release Date: December 11, 2025 - There are no mutation columns or mutation indexes (i.e., the table is not undergoing an `ALTER`); - There are no columns using composite encoding (e.g., `DECIMAL`, `FLOAT`, - `JSON`, etc.). [#145019][#145019] -- The optimizer now collapses repeated `%` wildcard characters in `LIKE` patterns. This may improve performance of queries using such patterns. [#158025][#158025] -- More of the CPU usage of LDR jobs is subject to background job admission control limits. [#158361][#158361] - - -[#158424]: https://github.com/cockroachdb/cockroach/pull/158424 -[#156966]: https://github.com/cockroachdb/cockroach/pull/156966 -[#158269]: https://github.com/cockroachdb/cockroach/pull/158269 -[#145019]: https://github.com/cockroachdb/cockroach/pull/145019 -[#158350]: https://github.com/cockroachdb/cockroach/pull/158350 -[#157030]: https://github.com/cockroachdb/cockroach/pull/157030 -[#158396]: https://github.com/cockroachdb/cockroach/pull/158396 -[#158085]: https://github.com/cockroachdb/cockroach/pull/158085 -[#152853]: https://github.com/cockroachdb/cockroach/pull/152853 -[#158219]: https://github.com/cockroachdb/cockroach/pull/158219 -[#158500]: https://github.com/cockroachdb/cockroach/pull/158500 -[#156902]: https://github.com/cockroachdb/cockroach/pull/156902 -[#154489]: https://github.com/cockroachdb/cockroach/pull/154489 -[#158636]: https://github.com/cockroachdb/cockroach/pull/158636 -[#158398]: https://github.com/cockroachdb/cockroach/pull/158398 -[#158595]: https://github.com/cockroachdb/cockroach/pull/158595 -[#155216]: https://github.com/cockroachdb/cockroach/pull/155216 -[#158364]: https://github.com/cockroachdb/cockroach/pull/158364 -[#158045]: https://github.com/cockroachdb/cockroach/pull/158045 -[#158162]: https://github.com/cockroachdb/cockroach/pull/158162 -[#158076]: https://github.com/cockroachdb/cockroach/pull/158076 -[#158025]: https://github.com/cockroachdb/cockroach/pull/158025 -[#156905]: https://github.com/cockroachdb/cockroach/pull/156905 -[#158055]: https://github.com/cockroachdb/cockroach/pull/158055 -[#158141]: https://github.com/cockroachdb/cockroach/pull/158141 -[#158499]: https://github.com/cockroachdb/cockroach/pull/158499 -[#158370]: https://github.com/cockroachdb/cockroach/pull/158370 -[#158361]: https://github.com/cockroachdb/cockroach/pull/158361 -[#158065]: https://github.com/cockroachdb/cockroach/pull/158065 -[#158096]: https://github.com/cockroachdb/cockroach/pull/158096 + `JSON`, etc.). #145019 +- The optimizer now collapses repeated `%` wildcard characters in `LIKE` patterns. This may improve performance of queries using such patterns. #158025 +- More of the CPU usage of LDR jobs is subject to background job admission control limits. #158361 + + diff --git a/src/current/_includes/releases/v26.1/v26.1.0-beta.1.md b/src/current/_includes/releases/v26.1/v26.1.0-beta.1.md index 4a8c03dc810..9b9cba10973 100644 --- a/src/current/_includes/releases/v26.1/v26.1.0-beta.1.md +++ b/src/current/_includes/releases/v26.1/v26.1.0-beta.1.md @@ -6,31 +6,31 @@ Release Date: December 17, 2025

Security updates

-- A new flag on the `cockroach start` command enables a `debug_user` with preconfigured access to authenticate for SQL and RPC connections. This is part of a Private Preview and disabled by default, preventing such authentication. It is intended only for debugging and troubleshooting when using the Private Preview feature that allows restricting `root` from login via SQL and RPC connections, while using `debug_user` to access debug details that would have required `root`. [#158963][#158963] +- A new flag on the `cockroach start` command enables a `debug_user` with preconfigured access to authenticate for SQL and RPC connections. This is part of a Private Preview and disabled by default, preventing such authentication. It is intended only for debugging and troubleshooting when using the Private Preview feature that allows restricting `root` from login via SQL and RPC connections, while using `debug_user` to access debug details that would have required `root`. #158963

{{ site.data.products.enterprise }} edition changes

-- Improved the description of the `changefeed.default_range_distribution_strategy` cluster setting to better explain the available options and their behavior. [#158602][#158602] +- Improved the description of the `changefeed.default_range_distribution_strategy` cluster setting to better explain the available options and their behavior. #158602

SQL language changes

-- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. [#156963][#156963] +- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. #156963 - The `ALTER COLUMN ...` sequence identity - commands are run by the declarative schema changer. [#157030][#157030] -- Added support for `EXECUTE SCHEDULE {schedule_id}` to allow immediate execution of a scheduled job. This does not apply to `ALTER BACKUP SCHEDULE`; attempting to execute a backup schedule will result in an error. [#158694][#158694] -- `CREATE TYPE` with composite type syntax now supports array types in field definitions. For example, `CREATE TYPE t AS (a INT[])` and `CREATE TYPE t AS (a INT ARRAY)` now work correctly, matching PostgreSQL behavior. [#158888][#158888] -- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. [#158999][#158999] -- The `WITH RESOLVED TIMESTAMP` option can be passed to `SHOW JOBS` or `SHOW JOB` to include the resolved timestamp, if any, for the jobs in the output columns. [#159068][#159068] + commands are run by the declarative schema changer. #157030 +- Added support for `EXECUTE SCHEDULE {schedule_id}` to allow immediate execution of a scheduled job. This does not apply to `ALTER BACKUP SCHEDULE`; attempting to execute a backup schedule will result in an error. #158694 +- `CREATE TYPE` with composite type syntax now supports array types in field definitions. For example, `CREATE TYPE t AS (a INT[])` and `CREATE TYPE t AS (a INT ARRAY)` now work correctly, matching PostgreSQL behavior. #158888 +- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. #158999 +- The `WITH RESOLVED TIMESTAMP` option can be passed to `SHOW JOBS` or `SHOW JOB` to include the resolved timestamp, if any, for the jobs in the output columns. #159068

Command-line changes

-- You can now specify a user-defined database user when generating `debug zip` and `debug tsdump` files. Use the `--user` and `--url` flags to set the username. Previously, these operations required the root user. This change provides backward compatibility by defaulting the username to `root`. This update is part of an ongoing effort to limit root user access. [#158961][#158961] +- You can now specify a user-defined database user when generating `debug zip` and `debug tsdump` files. Use the `--user` and `--url` flags to set the username. Previously, these operations required the root user. This change provides backward compatibility by defaulting the username to `root`. This update is part of an ongoing effort to limit root user access. #158961

Bug fixes

-- Fixed a bug where modifying a scheduled backup without changing the schedule could cause the next incremental backup to be skipped. [#158820][#158820] -- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the SET clause of an UPDATE statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. [#158935][#158935] -- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#158935][#158935] +- Fixed a bug where modifying a scheduled backup without changing the schedule could cause the next incremental backup to be skipped. #158820 +- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the SET clause of an UPDATE statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. #158935 +- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. #158935 - Fixed a bug that could cause incorrect query results when using prepared statements with NULL placeholders. The bug has existed since v21.2 and violated SQL NULL-equality semantics by returning rows with NULL values when the result set should have been empty. From v21.2 to v25.3, the bug occurred when all of the following were true: @@ -40,27 +40,12 @@ Release Date: December 17, 2025 - The column contained NULL values - The placeholder was assigned to NULL during execution - Starting in v25.4, the requirements were loosened: the column no longer needed to be UNIQUE, and the bug could reproduce if the column was included in any index. [#159001][#159001] -- Fixed a bug where the `schema_locked` table storage parameter could be bypassed by combining `SET (schema_locked=false)` with other schema changes in the same `ALTER TABLE` statement using comma syntax. Schema-locked tables now correctly reject such combined statements. [#159017][#159017] + Starting in v25.4, the requirements were loosened: the column no longer needed to be UNIQUE, and the bug could reproduce if the column was included in any index. #159001 +- Fixed a bug where the `schema_locked` table storage parameter could be bypassed by combining `SET (schema_locked=false)` with other schema changes in the same `ALTER TABLE` statement using comma syntax. Schema-locked tables now correctly reject such combined statements. #159017

Performance improvements

-- Triggers now perform the descriptor lookup for `TG_TABLE_SCHEMA` against a cache. This can significantly reduce trigger planning latency in multi-region databases. [#144217][#144217] -- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. [#158708][#158708] - - -[#158602]: https://github.com/cockroachdb/cockroach/pull/158602 -[#158961]: https://github.com/cockroachdb/cockroach/pull/158961 -[#159017]: https://github.com/cockroachdb/cockroach/pull/159017 -[#158708]: https://github.com/cockroachdb/cockroach/pull/158708 -[#158820]: https://github.com/cockroachdb/cockroach/pull/158820 -[#159001]: https://github.com/cockroachdb/cockroach/pull/159001 -[#144217]: https://github.com/cockroachdb/cockroach/pull/144217 -[#158963]: https://github.com/cockroachdb/cockroach/pull/158963 -[#156963]: https://github.com/cockroachdb/cockroach/pull/156963 -[#158999]: https://github.com/cockroachdb/cockroach/pull/158999 -[#159068]: https://github.com/cockroachdb/cockroach/pull/159068 -[#158694]: https://github.com/cockroachdb/cockroach/pull/158694 -[#158935]: https://github.com/cockroachdb/cockroach/pull/158935 -[#157030]: https://github.com/cockroachdb/cockroach/pull/157030 -[#158888]: https://github.com/cockroachdb/cockroach/pull/158888 +- Triggers now perform the descriptor lookup for `TG_TABLE_SCHEMA` against a cache. This can significantly reduce trigger planning latency in multi-region databases. #144217 +- `AFTER` triggers now use a cache for descriptor lookups of `TG_TABLE_SCHEMA`, which can significantly reduce trigger planning latency. #158708 + + diff --git a/src/current/_includes/releases/v26.1/v26.1.0-beta.2.md b/src/current/_includes/releases/v26.1/v26.1.0-beta.2.md index 67f5c069cee..f498ce8c251 100644 --- a/src/current/_includes/releases/v26.1/v26.1.0-beta.2.md +++ b/src/current/_includes/releases/v26.1/v26.1.0-beta.2.md @@ -6,35 +6,24 @@ Release Date: January 7, 2026

SQL language changes

-- Removed a `database_name` column from the output of `SHOW CHANGEFEED JOBS` that was added in v26.1 development, but will not be needed for v26.1 features. [#158995][#158995] -- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default before v26.1. [#159439][#159439] -- `INSPECT` is now implemented as a generally available (GA) feature with the release of v26.1.0. The `enable_inspect_command` session variable has been deprecated, and is now effectively always set to `true`. [#159750][#159750] +- Removed a `database_name` column from the output of `SHOW CHANGEFEED JOBS` that was added in v26.1 development, but will not be needed for v26.1 features. #158995 +- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default before v26.1. #159439 +- `INSPECT` is now implemented as a generally available (GA) feature with the release of v26.1.0. The `enable_inspect_command` session variable has been deprecated, and is now effectively always set to `true`. #159750

Bug fixes

- Fixed a bug that could cause prepared statements to fail with the error message "non-const expression" when they contained filters with stable - functions. This bug has been present since v25.4.0. [#159203][#159203] -- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. [#159288][#159288] -- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. [#159380][#159380] -- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. [#159405][#159405] -- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159435][#159435] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159542][#159542] -- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. [#159625][#159625] + functions. This bug has been present since v25.4.0. #159203 +- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. #159288 +- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. #159380 +- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. #159405 +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. #159435 +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. #159542 +- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. #159625

Performance improvements

-- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. [#159258][#159258] +- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. #159258 -[#159625]: https://github.com/cockroachdb/cockroach/pull/159625 -[#158995]: https://github.com/cockroachdb/cockroach/pull/158995 -[#159439]: https://github.com/cockroachdb/cockroach/pull/159439 -[#159203]: https://github.com/cockroachdb/cockroach/pull/159203 -[#159288]: https://github.com/cockroachdb/cockroach/pull/159288 -[#159542]: https://github.com/cockroachdb/cockroach/pull/159542 -[#159258]: https://github.com/cockroachdb/cockroach/pull/159258 -[#159750]: https://github.com/cockroachdb/cockroach/pull/159750 -[#159380]: https://github.com/cockroachdb/cockroach/pull/159380 -[#159405]: https://github.com/cockroachdb/cockroach/pull/159405 -[#159435]: https://github.com/cockroachdb/cockroach/pull/159435 diff --git a/src/current/_includes/releases/v26.1/v26.1.0-beta.3.md b/src/current/_includes/releases/v26.1/v26.1.0-beta.3.md index 55373119490..d46693043b5 100644 --- a/src/current/_includes/releases/v26.1/v26.1.0-beta.3.md +++ b/src/current/_includes/releases/v26.1/v26.1.0-beta.3.md @@ -6,53 +6,40 @@ Release Date: January 14, 2025

{{ site.data.products.enterprise }} edition changes

-- Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. [#160016][#160016] +- Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. #160016

SQL language changes

-- Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159566][#159566] +- Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. #159566 - Added cluster settings to control the number of concurrent automatic statistics collection jobs: - `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. - `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. - Note that at most one statistics collection job can run on a single table at a time. [#159870][#159870] + Note that at most one statistics collection job can run on a single table at a time. #159870

Operational changes

-- The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159677][#159677] +- The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. #159677

Bug fixes

-- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. [#159644][#159644] -- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. [#159778][#159778] -- Fixed a deadlock that could occur when a statistics creation task panicked. [#160422][#160422] -- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. [#160477][#160477] -- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. [#160561][#160561] +- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. #159644 +- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. #159778 +- Fixed a deadlock that could occur when a statistics creation task panicked. #160422 +- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. #160477 +- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. #160561 - v26.1.0-beta.1 and v26.1.0-beta.2 versions of CockroachDB could encounter a rare process crash when running TTL jobs, - and this has now been fixed. [#160689][#160689] + and this has now been fixed. #160689

Performance improvements

-- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. [#160228][#160228] -- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which defaults to true. When enabled, this prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160600][#160600] +- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. #160228 +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which defaults to true. When enabled, this prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. #160600

Miscellaneous

-- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159904][#159904] - - -[#160600]: https://github.com/cockroachdb/cockroach/pull/160600 -[#159904]: https://github.com/cockroachdb/cockroach/pull/159904 -[#160016]: https://github.com/cockroachdb/cockroach/pull/160016 -[#159870]: https://github.com/cockroachdb/cockroach/pull/159870 -[#159778]: https://github.com/cockroachdb/cockroach/pull/159778 -[#160422]: https://github.com/cockroachdb/cockroach/pull/160422 -[#160561]: https://github.com/cockroachdb/cockroach/pull/160561 -[#160228]: https://github.com/cockroachdb/cockroach/pull/160228 -[#159566]: https://github.com/cockroachdb/cockroach/pull/159566 -[#159677]: https://github.com/cockroachdb/cockroach/pull/159677 -[#159644]: https://github.com/cockroachdb/cockroach/pull/159644 -[#160477]: https://github.com/cockroachdb/cockroach/pull/160477 -[#160689]: https://github.com/cockroachdb/cockroach/pull/160689 +- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. #159904 + + diff --git a/src/current/_includes/releases/v26.1/v26.1.0-rc.1.md b/src/current/_includes/releases/v26.1/v26.1.0-rc.1.md index c84a6a06495..51dfaa7e4a4 100644 --- a/src/current/_includes/releases/v26.1/v26.1.0-rc.1.md +++ b/src/current/_includes/releases/v26.1/v26.1.0-rc.1.md @@ -6,39 +6,27 @@ Release Date: January 22, 2026

SQL language changes

-- Added support for `SHOW STATEMENT HINTS`, which displays information about the statement hints (if any) associated with the given statement fingerprint string. The fingerprint is normalized in the same way as `EXPLAIN (FINGERPRINT)` before hints are matched. Example usage: `SHOW STATEMENT HINTS FOR ' SELECT * FROM xy WHERE x = 10 '` or `SHOW STATEMENT HINTS FOR $$ SELECT * FROM xy WHERE x = 10 $$ WITH DETAILS`. [#160865][#160865] -- Calling `information_schema.crdb_rewrite_inline_hints` now requires the REPAIRCLUSTER privilege. [#160946][#160946] -- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160946][#160946] +- Added support for `SHOW STATEMENT HINTS`, which displays information about the statement hints (if any) associated with the given statement fingerprint string. The fingerprint is normalized in the same way as `EXPLAIN (FINGERPRINT)` before hints are matched. Example usage: `SHOW STATEMENT HINTS FOR ' SELECT * FROM xy WHERE x = 10 '` or `SHOW STATEMENT HINTS FOR $$ SELECT * FROM xy WHERE x = 10 $$ WITH DETAILS`. #160865 +- Calling `information_schema.crdb_rewrite_inline_hints` now requires the REPAIRCLUSTER privilege. #160946 +- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. #160946

Operational changes

-- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160994][#160994] -- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the OPS channel. [#161035][#161035] -- Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161085][#161085] +- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. #160994 +- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the OPS channel. #161035 +- Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. #161085

DB Console changes

-- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. [#160937][#160937] -- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#161076][#161076] +- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. #160937 +- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. #161076

Bug fixes

-- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. [#160126][#160126] -- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. [#160804][#160804] -- Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. [#160917][#160917] -- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. [#161312][#161312] -- Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. [#161325][#161325] - - -[#161325]: https://github.com/cockroachdb/cockroach/pull/161325 -[#160946]: https://github.com/cockroachdb/cockroach/pull/160946 -[#161085]: https://github.com/cockroachdb/cockroach/pull/161085 -[#161076]: https://github.com/cockroachdb/cockroach/pull/161076 -[#160804]: https://github.com/cockroachdb/cockroach/pull/160804 -[#160917]: https://github.com/cockroachdb/cockroach/pull/160917 -[#161312]: https://github.com/cockroachdb/cockroach/pull/161312 -[#160865]: https://github.com/cockroachdb/cockroach/pull/160865 -[#160994]: https://github.com/cockroachdb/cockroach/pull/160994 -[#161035]: https://github.com/cockroachdb/cockroach/pull/161035 -[#160937]: https://github.com/cockroachdb/cockroach/pull/160937 -[#160126]: https://github.com/cockroachdb/cockroach/pull/160126 +- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. #160126 +- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. #160804 +- Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. #160917 +- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. #161312 +- Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. #161325 + + diff --git a/src/current/_includes/releases/v26.1/v26.1.1.md b/src/current/_includes/releases/v26.1/v26.1.1.md index 55c169f65ba..1b99ebbb34e 100644 --- a/src/current/_includes/releases/v26.1/v26.1.1.md +++ b/src/current/_includes/releases/v26.1/v26.1.1.md @@ -6,42 +6,28 @@ Release Date: March 9, 2026

General changes

-- Added support for the `partition_alg` changefeed option to specify a Kafka partitioning algorithm. Supported values are `fnv-1a` (default) and `murmur2`. For example: `CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. This option is only valid for Kafka v2 sinks and is controlled by the cluster setting `changefeed.partition_alg.enabled`. Once a changefeed is created with a specific algorithm, it continues to use that algorithm even if the cluster setting is later disabled, unless the changefeed is altered to use a different `partition_alg` value. [#161532][#161532] +- Added support for the `partition_alg` changefeed option to specify a Kafka partitioning algorithm. Supported values are `fnv-1a` (default) and `murmur2`. For example: `CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. This option is only valid for Kafka v2 sinks and is controlled by the cluster setting `changefeed.partition_alg.enabled`. Once a changefeed is created with a specific algorithm, it continues to use that algorithm even if the cluster setting is later disabled, unless the changefeed is altered to use a different `partition_alg` value. #161532

SQL language changes

-- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. [#161196][#161196] +- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. #161196

Bug fixes

-- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. [#161446][#161446] -- Fixed a bug which prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug was present since hint injection was first introduced in v26.1.0. [#161970][#161970] -- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. [#161990][#161990] -- Fixed a bug where multi-statement explicit transactions that use `SAVEPOINT` to recover from certain errors (like duplicate key value violations) could lose writes performed before the savepoint was created in rare cases when buffered writes were enabled. Buffered writes are a public preview feature that is off by default. This bug was introduced in v25.2. [#162031][#162031] -- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162121][#162121] -- Fixed a bug where queries using a generic plan with a lookup join on an index containing identity computed columns would fail with the error "cannot map variable %d to an indexed var". [#162140][#162140] -- Fixed an internal error "could not find format code for column N" that occurred when executing `EXPLAIN ANALYZE EXECUTE` statements via JDBC or other clients using the PostgreSQL binary protocol. [#162284][#162284] -- Fixed a bug where CockroachDB would encounter an internal error when evaluating builtin functions with `'{}'` as an argument without explicit type casts (e.g., `SELECT cardinality('{}');`). A regular error is now returned instead, matching PostgreSQL behavior. [#162358][#162358] -- Fixed a bug in which PL/pgSQL UDFs with many IF statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. [#162560][#162560] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163353][#163353] -- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. [#163961][#163961] +- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. #161446 +- Fixed a bug which prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug was present since hint injection was first introduced in v26.1.0. #161970 +- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. #161990 +- Fixed a bug where multi-statement explicit transactions that use `SAVEPOINT` to recover from certain errors (like duplicate key value violations) could lose writes performed before the savepoint was created in rare cases when buffered writes were enabled. Buffered writes are a public preview feature that is off by default. This bug was introduced in v25.2. #162031 +- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. #162121 +- Fixed a bug where queries using a generic plan with a lookup join on an index containing identity computed columns would fail with the error "cannot map variable %d to an indexed var". #162140 +- Fixed an internal error "could not find format code for column N" that occurred when executing `EXPLAIN ANALYZE EXECUTE` statements via JDBC or other clients using the PostgreSQL binary protocol. #162284 +- Fixed a bug where CockroachDB would encounter an internal error when evaluating builtin functions with `'{}'` as an argument without explicit type casts (e.g., `SELECT cardinality('{}');`). A regular error is now returned instead, matching PostgreSQL behavior. #162358 +- Fixed a bug in which PL/pgSQL UDFs with many IF statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in versions v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. #162560 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. #163353 +- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. #163961

Performance improvements

-- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#163283][#163283] - - -[#161970]: https://github.com/cockroachdb/cockroach/pull/161970 -[#162140]: https://github.com/cockroachdb/cockroach/pull/162140 -[#162358]: https://github.com/cockroachdb/cockroach/pull/162358 -[#163353]: https://github.com/cockroachdb/cockroach/pull/163353 -[#161446]: https://github.com/cockroachdb/cockroach/pull/161446 -[#162031]: https://github.com/cockroachdb/cockroach/pull/162031 -[#162121]: https://github.com/cockroachdb/cockroach/pull/162121 -[#162284]: https://github.com/cockroachdb/cockroach/pull/162284 -[#163961]: https://github.com/cockroachdb/cockroach/pull/163961 -[#161532]: https://github.com/cockroachdb/cockroach/pull/161532 -[#161990]: https://github.com/cockroachdb/cockroach/pull/161990 -[#162560]: https://github.com/cockroachdb/cockroach/pull/162560 -[#163283]: https://github.com/cockroachdb/cockroach/pull/163283 -[#161196]: https://github.com/cockroachdb/cockroach/pull/161196 +- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. #163283 + + diff --git a/src/current/_includes/releases/v26.1/v26.1.2.md b/src/current/_includes/releases/v26.1/v26.1.2.md index 7d71de3e182..027ceaca4f8 100644 --- a/src/current/_includes/releases/v26.1/v26.1.2.md +++ b/src/current/_includes/releases/v26.1/v26.1.2.md @@ -6,28 +6,17 @@ Release Date: April 3, 2026

Command-line changes

-- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. [#164146][#164146] +- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. #164146

Bug fixes

-- Fixed a bug where contention events reported the wrong key. Previously, the key field in contention events showed the transaction's anchor key (used for record placement) rather than the actual key where the conflict occurred. This made it difficult to diagnose contention issues accurately. [#164157][#164157] -- Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. [#164754][#164754] -- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. [#164798][#164798] -- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. [#164891][#164891] -- Fixed a bug introduced in v25.4 where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` lower than `500ms` is not recommended as it may cause degraded changefeed performance. [#164892][#164892] -- Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164936][#164936] -- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. [#165274][#165274] -- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165562][#165562] -- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. [#165994][#165994] -- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. [#166226][#166226] -[#164754]: https://github.com/cockroachdb/cockroach/pull/164754 -[#164892]: https://github.com/cockroachdb/cockroach/pull/164892 -[#165562]: https://github.com/cockroachdb/cockroach/pull/165562 -[#166226]: https://github.com/cockroachdb/cockroach/pull/166226 -[#164936]: https://github.com/cockroachdb/cockroach/pull/164936 -[#165274]: https://github.com/cockroachdb/cockroach/pull/165274 -[#165994]: https://github.com/cockroachdb/cockroach/pull/165994 -[#164146]: https://github.com/cockroachdb/cockroach/pull/164146 -[#164157]: https://github.com/cockroachdb/cockroach/pull/164157 -[#164798]: https://github.com/cockroachdb/cockroach/pull/164798 -[#164891]: https://github.com/cockroachdb/cockroach/pull/164891 +- Fixed a bug where contention events reported the wrong key. Previously, the key field in contention events showed the transaction's anchor key (used for record placement) rather than the actual key where the conflict occurred. This made it difficult to diagnose contention issues accurately. #164157 +- Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. #164754 +- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. #164798 +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with an `ON` filter that is mostly `false`. #164891 +- Fixed a bug introduced in v25.4 where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` lower than `500ms` is not recommended as it may cause degraded changefeed performance. #164892 +- Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. #164936 +- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. #165274 +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. #165562 +- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. #165994 +- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. #166226 diff --git a/src/current/_includes/releases/v26.1/v26.1.3.md b/src/current/_includes/releases/v26.1/v26.1.3.md index a1aa820010e..5b8c071f0e3 100644 --- a/src/current/_includes/releases/v26.1/v26.1.3.md +++ b/src/current/_includes/releases/v26.1/v26.1.3.md @@ -6,7 +6,6 @@ Release Date: April 20, 2026

Bug fixes

-- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#168471][#168471] +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. #168471 -[#168471]: https://github.com/cockroachdb/cockroach/pull/168471 diff --git a/src/current/_includes/releases/v26.2/backward-incompatible.md b/src/current/_includes/releases/v26.2/backward-incompatible.md index c5e92205162..75f512eeaf1 100644 --- a/src/current/_includes/releases/v26.2/backward-incompatible.md +++ b/src/current/_includes/releases/v26.2/backward-incompatible.md @@ -1,10 +1,8 @@ - Bullet - - `information_schema.crdb_datums_to_bytes` - previously only available as `crdb_internal.datums_to_bytes` [#](https://github.com/cockroachdb/cockroach/pull/) - - `information_schema.crdb_index_usage_stats` - previously only available as `crdb_internal.index_usage_stats` [#](https://github.com/cockroachdb/cockroach/pull/) - - `information_schema.crdb_rewrite_inline_hints` - replaces the function previously introduced as `crdb_internal.inject_hint` [#](https://github.com/cockroachdb/cockroach/pull/) - - [#](https://github.com/cockroachdb/cockroach/pull/) + - `information_schema.crdb_datums_to_bytes` - previously only available as `crdb_internal.datums_to_bytes` + - `information_schema.crdb_index_usage_stats` - previously only available as `crdb_internal.index_usage_stats` + - `information_schema.crdb_rewrite_inline_hints` - replaces the function previously introduced as `crdb_internal.inject_hint` - Bullet diff --git a/src/current/_includes/releases/v26.2/cluster-setting-changes.md b/src/current/_includes/releases/v26.2/cluster-setting-changes.md index 819b2aaf026..dee5490e127 100644 --- a/src/current/_includes/releases/v26.2/cluster-setting-changes.md +++ b/src/current/_includes/releases/v26.2/cluster-setting-changes.md @@ -17,20 +17,6 @@ Changes to [cluster settings]({% link {{ page.version.version }}/cluster-setting - Events related to changefeed operations are now routed to the CHANGEFEED channel, while sampled queries and transactions, along with certain SQL performance events, are logged to SQL_EXEC. To continue using the previous logging channels, set `log.channel_compatibility_mode.enabled` to `true`. - Bullet - - Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#][#] + - Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. # -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ diff --git a/src/current/_includes/releases/v26.2/deprecations.md b/src/current/_includes/releases/v26.2/deprecations.md index 30c39187d9b..007e0829d7b 100644 --- a/src/current/_includes/releases/v26.2/deprecations.md +++ b/src/current/_includes/releases/v26.2/deprecations.md @@ -6,10 +6,8 @@ The following deprecations are announced in v26.1. - `INSPECT` supports a `DETACHED` option to run the operation without waiting for it - For more information, see the [`INSPECT`]({% link {{ page.version.version }}/inspect.md %}) documentation - [#][#] + # - Bullet -[#]: https://github.com/cockroachdb/cockroach/pull/ -[#]: https://github.com/cockroachdb/cockroach/pull/ diff --git a/src/current/_includes/releases/v26.2/v26.2.0-alpha.1.md b/src/current/_includes/releases/v26.2/v26.2.0-alpha.1.md index 5b97c116714..0be060beeec 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0-alpha.1.md +++ b/src/current/_includes/releases/v26.2/v26.2.0-alpha.1.md @@ -6,326 +6,184 @@ Release Date: March 11, 2026

Backward-incompatible changes

-- Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). [#161806][#161806] -- The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. [#161925][#161925] -- Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468][#164468] -- Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. [#159162][#159162] -- The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163867][#163867] -- The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163868][#163868] -- The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. [#160051][#160051] -- Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. [#164225][#164225] -- TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). [#161226][#161226] -- Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. [#160716][#160716] -- The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. [#159558][#159558] -- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. [#160956][#160956] -- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. [#160716][#160716] -- Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. [#159189][#159189] -- Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. [#160416][#160416] +- Increased the default value of `sql.stats.automatic_full_concurrency_limit` (which controls the maximum number of concurrent full statistics collections) from `1` to number of vCPUs divided by 2 (e.g., 4 vCPU nodes will have the value of `2`). #161806 +- The `TG_ARGV` trigger function parameter now uses 0-based indexing to match PostgreSQL behavior. Previously, `TG_ARGV[1]` returned the first argument; now `TG_ARGV[0]` returns the first argument and `TG_ARGV[1]` returns the second argument. Additionally, usage of `TG_ARGV` no longer requires setting the `allow_create_trigger_function_with_argv_references` session variable. #161925 +- Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. #164468 +- Changed the default value of the `sql.catalog.allow_leased_descriptors.enabled` cluster setting to `true`. This setting allows introspection tables like `information_schema` and `pg_catalog` to use cached descriptors when building the table results, which improves the performance of introspection queries when there are many tables in the cluster. #159162 +- The `bulkio.import.elastic_control.enabled` cluster setting is now enabled by default, allowing import operations to integrate with elastic CPU control and automatically throttle based on available resources. #163867 +- The `bulkio.ingest.sst_batcher_elastic_control.enabled` cluster setting is now enabled by default, allowing SST batcher operations to integrate with elastic CPU control and automatically throttle based on available resources. #163868 +- The session variable `distsql_prevent_partitioning_soft_limited_scans` is now enabled by default. This prevents scans with soft limits from being planned as multiple TableReaders, which decreases the initial setup costs of some fully-distributed query plans. #160051 +- Creating or altering a changefeed or Kafka/Pub/Sub external connection now returns an error when the `topic_name` query parameter is explicitly set to an empty string in the sink URI, rather than silently falling back to using the table name as the topic name. Existing changefeeds with an empty `topic_name` are not affected. #164225 +- TTL jobs are now owned by the schedule owner instead of the `node` user. This allows users with `CONTROLJOB` privilege to cancel TTL jobs, provided the schedule owner is not an admin (`CONTROLJOB` does not grant control over admin-owned jobs). #161226 +- Calling `information_schema.crdb_rewrite_inline_hints` now requires the `REPAIRCLUSTER` privilege. #160716 +- The **Statement Details** page URL format has changed from `/statement/{implicitTxn}/{statementId}` to `/statement/{statementId}`. As a result, bookmarks using the old URL structure will no longer work. #159558 +- Changed the unit of measurement for admission control duration metrics from microseconds to nanoseconds. The following metrics are affected: `admission.granter.slots_exhausted_duration.kv`, `admission.granter.cpu_load_short_period_duration.kv`, `admission.granter.cpu_load_long_period_duration.kv`, `admission.granter.io_tokens_exhausted_duration.kv`, `admission.granter.elastic_io_tokens_exhausted_duration.kv`, and `admission.elastic_cpu.nanos_exhausted_duration`. Note that dashboards displaying these metrics will show a discontinuity at upgrade time, with pre-upgrade values appearing much lower due to the unit change. #160956 +- Renamed the builtin function `crdb_internal.inject_hint` (introduced in v26.1.0-alpha.2) to `information_schema.crdb_rewrite_inline_hints`. #160716 +- Removed the `incremental_location` option from `BACKUP` and `CREATE SCHEDULE FOR BACKUP`. #159189 +- Removed the `incremental_location` option from `SHOW BACKUP` and `RESTORE`. #160416

Security updates

-- LDAP authentication for the DB Console now supports automatic user provisioning. When the cluster setting `security.provisioning.ldap.enabled` is set to true, users who authenticate successfully via LDAP will be automatically created in CockroachDB if they do not already exist. [#163199][#163199] +- LDAP authentication for the DB Console now supports automatic user provisioning. When the cluster setting `security.provisioning.ldap.enabled` is set to true, users who authenticate successfully via LDAP will be automatically created in CockroachDB if they do not already exist. #163199

General changes

-- Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. [#161265][#161265] +- Changefeeds now support the `partition_alg` option for specifying a Kafka partitioning algorithm. Currently `fnv-1a` (default) and `murmur2` are supported. The option is only valid on Kafka v2 sinks. This is protected by the cluster setting `changefeed.partition_alg.enabled`. An example usage: `SET CLUSTER SETTING changefeed.partition_alg.enabled=true; CREATE CHANGEFEED ... INTO 'kafka://...' WITH partition_alg='murmur2';`. Note that if a changefeed is created using the `murmur2` algorithm, and then the cluster setting is disabled, the changefeed will continue using the `murmur2` algorithm unless the changefeed is altered to use a different `partition_alg`. #161265

{{ site.data.products.enterprise }} edition changes

-- Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. [#159787][#159787] -- LDAP authentication for the DB Console now additionally supports role-based access control (RBAC) through LDAP group membership. To use this feature, an administrator must first create roles in CockroachDB with names that match the Common Names (CN) of their LDAP groups. These roles should then be granted the desired privileges for DB Console access. When a user who is a member of a corresponding LDAP group logs into the DB Console, they will be automatically granted the role and its associated privileges, creating consistent behavior with SQL client connections. [#162302][#162302] +- Added a new cluster setting, `security.provisioning.oidc.enabled`, to allow automatic provisioning of users when they log in for the first time via OIDC. When enabled, a new user will be created in CockroachDB upon their first successful OIDC authentication. This feature is disabled by default. #159787 +- LDAP authentication for the DB Console now additionally supports role-based access control (RBAC) through LDAP group membership. To use this feature, an administrator must first create roles in CockroachDB with names that match the Common Names (CN) of their LDAP groups. These roles should then be granted the desired privileges for DB Console access. When a user who is a member of a corresponding LDAP group logs into the DB Console, they will be automatically granted the role and its associated privileges, creating consistent behavior with SQL client connections. #162302

SQL language changes

-- Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. [#164236][#164236] +- Added the `MAINTAIN` privilege, which can be granted on tables and materialized views. Users with the `MAINTAIN` privilege on a materialized view can execute `REFRESH MATERIALIZED VIEW` without being the owner. Users with the `MAINTAIN` privilege on a table can execute `ANALYZE` without needing `SELECT`. This aligns with PostgreSQL 17 behavior. #164236 - Added cluster settings to control the number of concurrent automatic statistics collection jobs: - `sql.stats.automatic_full_concurrency_limit` controls the maximum number of concurrent full statistics collections. The default is 1. - `sql.stats.automatic_extremes_concurrency_limit` controls the maximum number of concurrent partial statistics collections using extremes. The default is 128. - Note that at most one statistics collection job can run on a single table at a time. [#158835][#158835] -- Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. [#159330][#159330] -- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. [#164369][#164369] -- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. [#161880][#161880] -- Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN {collection}` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN {timestamp}` or `NEWER THAN {timestamp}` clauses. Example usage: `SET use_backups_with_ids = true; SHOW BACKUPS IN '{collection}' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17';` [#160137][#160137] -- If the new `SHOW BACKUP` experience is enabled by setting the `use_backups_with_ids` session variable to true, `SHOW BACKUP` will parse the IDs provided by `SHOW BACKUPS` and display contents for single backups. [#160812][#160812] -- If the new `RESTORE` experience is enabled by setting the `use_backups_with_ids` session variable to true, `RESTORE` will parse the IDs provided by `SHOW BACKUPS` and will restore the specified backup without the use of `AS OF SYSTEM TIME`. [#161294][#161294] -- `SHOW BACKUP` and `RESTORE` now allow backup IDs even if the `use_backups_with_ids` session variable is not set. Setting the variable only configures whether `LATEST` is resolved using the new or legacy path. [#162329][#162329] -- Added the `REVISION START TIME` option to the new `SHOW BACKUPS` experience enabled via the `use_backups_with_ids` session variable. Use the `REVISION START TIME` option to view the revision start times of revision history backups. [#161328][#161328] -- Added support for `SHOW STATEMENT HINTS`, which displays information about the statement hints (if any) associated with the given statement fingerprint string. The fingerprint is normalized in the same way as `EXPLAIN (FINGERPRINT)` before hints are matched. Example usage: `SHOW STATEMENT HINTS FOR ' SELECT * FROM xy WHERE x = 10 '` or `SHOW STATEMENT HINTS FOR $$ SELECT * FROM xy WHERE x = 10 $$ WITH DETAILS`. [#159231][#159231] -- `CREATE OR REPLACE TRIGGER` is now supported. If a trigger with the same name already exists on the same table, it is replaced with the new definition. If no trigger with that name exists, a new trigger is created. [#162633][#162633] -- Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. [#161924][#161924] -- Updated `DROP TRIGGER` to accept the `CASCADE` option for PostgreSQL compatibility. Since triggers in CockroachDB cannot have dependents, `CASCADE` behaves the same as `RESTRICT` or omitting the option entirely. [#161915][#161915] -- `ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. [#162345][#162345] -- `DROP COLUMN` and `DROP INDEX` with `CASCADE` now properly drop dependent triggers. Previously, these operations would fail with an unimplemented error when a trigger depended on the column or index being dropped. [#163296][#163296] -- `CREATE OR REPLACE FUNCTION` now works on trigger functions that have active triggers. Previously, this was blocked with an unimplemented error, requiring users to drop and recreate triggers. The replacement now atomically updates all dependent triggers to execute the new function body. [#163348][#163348] -- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. [#161422][#161422] -- Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). [#162286][#162286] -- A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. [#156771][#156771] -- `INSPECT` is now a generally available (GA) feature. The `enable_inspect_command` session variable has been deprecated, and is now effectively always set to `true`. [#159659][#159659] -- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. [#158999][#158999] -- Added support for the `dmetaphone()`, `dmetaphone_alt()`, and `daitch_mokotoff()` built-in functions, completing CockroachDB's implementation of the PostgreSQL `fuzzystrmatch` extension. `dmetaphone` and `dmetaphone_alt` return Double Metaphone phonetic codes for a string, and `daitch_mokotoff` returns an array of Daitch-Mokotoff soundex codes. These functions are useful for fuzzy string matching based on phonetic similarity. [#163430][#163430] -- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. [#156963][#156963] -- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. [#160486][#160486] -- Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. [#163543][#163543] -- Queries executed via the vectorized engine now display their progress in the `phase` column of `SHOW QUERIES`. Previously, this feature was only available in the row-by-row engine. [#158029][#158029] -- CockroachDB now shows execution statistics (like `execution time`) on `EXPLAIN ANALYZE` output for `render` nodes, which often handle built-in functions. [#161509][#161509] -- The output of `EXPLAIN [ANALYZE]` in non-`VERBOSE` mode is now more succinct. [#153361][#153361] + Note that at most one statistics collection job can run on a single table at a time. #158835 +- Added a new cluster setting `bulkio.import.distributed_merge.mode` to enable distributed merge support for `IMPORT` operations. When enabled (default: false), `IMPORT` jobs will use a two-phase approach where import processors first write SST files to local storage, then a coordinator merges and ingests them. This can improve performance for large imports by reducing L0 file counts and enabling merge-time optimizations. This feature requires all nodes to be running v26.1 or later. #159330 +- CockroachDB now supports the PostgreSQL session variables `tcp_keepalives_idle`, `tcp_keepalives_interval`, `tcp_keepalives_count`, and `tcp_user_timeout`. These allow per-session control over TCP keepalive behavior on each connection. A value of 0 (the default) uses the corresponding cluster setting. Non-zero values override the cluster setting for that session only. Units match PostgreSQL: seconds for keepalive settings, milliseconds for `tcp_user_timeout`. #164369 +- Added the `optimizer_inline_any_unnest_subquery` session setting to enable/disable the optimizer rule `InlineAnyProjectSet`. The setting is on by default in v26.2 and later. #161880 +- Users can now set the `use_backups_with_ids` session setting to enable a new `SHOW BACKUPS IN` experience. When enabled, `SHOW BACKUPS IN {collection}` displays all backups in the collection. Results can be filtered by backup end time using `OLDER THAN {timestamp}` or `NEWER THAN {timestamp}` clauses. Example usage: `SET use_backups_with_ids = true; SHOW BACKUPS IN '{collection}' OLDER THAN '2026-01-09 12:13:14' NEWER THAN '2026-01-04 15:16:17';` #160137 +- If the new `SHOW BACKUP` experience is enabled by setting the `use_backups_with_ids` session variable to true, `SHOW BACKUP` will parse the IDs provided by `SHOW BACKUPS` and display contents for single backups. #160812 +- If the new `RESTORE` experience is enabled by setting the `use_backups_with_ids` session variable to true, `RESTORE` will parse the IDs provided by `SHOW BACKUPS` and will restore the specified backup without the use of `AS OF SYSTEM TIME`. #161294 +- `SHOW BACKUP` and `RESTORE` now allow backup IDs even if the `use_backups_with_ids` session variable is not set. Setting the variable only configures whether `LATEST` is resolved using the new or legacy path. #162329 +- Added the `REVISION START TIME` option to the new `SHOW BACKUPS` experience enabled via the `use_backups_with_ids` session variable. Use the `REVISION START TIME` option to view the revision start times of revision history backups. #161328 +- Added support for `SHOW STATEMENT HINTS`, which displays information about the statement hints (if any) associated with the given statement fingerprint string. The fingerprint is normalized in the same way as `EXPLAIN (FINGERPRINT)` before hints are matched. Example usage: `SHOW STATEMENT HINTS FOR ' SELECT * FROM xy WHERE x = 10 '` or `SHOW STATEMENT HINTS FOR $$ SELECT * FROM xy WHERE x = 10 $$ WITH DETAILS`. #159231 +- `CREATE OR REPLACE TRIGGER` is now supported. If a trigger with the same name already exists on the same table, it is replaced with the new definition. If no trigger with that name exists, a new trigger is created. #162633 +- Added support for `ALTER TABLE ENABLE TRIGGER` and `ALTER TABLE DISABLE TRIGGER` syntax. This allows users to temporarily disable triggers without dropping them, and later re-enable them. The syntax supports disabling/enabling individual triggers by name, or all triggers on a table using the `ALL` or `USER` keywords. #161924 +- Updated `DROP TRIGGER` to accept the `CASCADE` option for PostgreSQL compatibility. Since triggers in CockroachDB cannot have dependents, `CASCADE` behaves the same as `RESTRICT` or omitting the option entirely. #161915 +- `ALTER TABLE ... DROP CONSTRAINT` can now be used to drop `UNIQUE` constraints. The backing `UNIQUE` index will also be dropped, as CockroachDB treats the constraint and index as the same thing. #162345 +- `DROP COLUMN` and `DROP INDEX` with `CASCADE` now properly drop dependent triggers. Previously, these operations would fail with an unimplemented error when a trigger depended on the column or index being dropped. #163296 +- `CREATE OR REPLACE FUNCTION` now works on trigger functions that have active triggers. Previously, this was blocked with an unimplemented error, requiring users to drop and recreate triggers. The replacement now atomically updates all dependent triggers to execute the new function body. #163348 +- Updated CockroachDB to allow a prefix of index key columns to be used for the shard column in a hash-sharded index. The `shard_columns` storage parameter may be used to override the default, which uses all index key columns in the shard column. #161422 +- Added support for the `pg_trigger_depth()` builtin function, which returns the current nesting level of PostgreSQL triggers (0 if not called from inside a trigger). #162286 +- A database-level changefeed with no tables will periodically poll to check for tables added to the database. The new option `hibernation_polling_frequency` sets the frequency at which the polling occurs, until a table is found, at which point polling ceases. #156771 +- `INSPECT` is now a generally available (GA) feature. The `enable_inspect_command` session variable has been deprecated, and is now effectively always set to `true`. #159659 +- Added the `STRICT` option for locality-aware backups. When enabled, backups fail if data from a KV node with one locality tag would be backed up to a bucket with a different locality tag, ensuring data domiciling compliance. #158999 +- Added support for the `dmetaphone()`, `dmetaphone_alt()`, and `daitch_mokotoff()` built-in functions, completing CockroachDB's implementation of the PostgreSQL `fuzzystrmatch` extension. `dmetaphone` and `dmetaphone_alt` return Double Metaphone phonetic codes for a string, and `daitch_mokotoff` returns an array of Daitch-Mokotoff soundex codes. These functions are useful for fuzzy string matching based on phonetic similarity. #163430 +- `crdb_internal.datums_to_bytes` is now available in the `information_schema` system catalog as `information_schema.crdb_datums_to_bytes`. #156963 +- The `information_schema.crdb_datums_to_bytes` built-in function is now documented. #160486 +- Row count validation after `IMPORT` is now enabled by default in async mode. After an `IMPORT` completes, a background `INSPECT` job validates that the imported row count matches expectations. The `IMPORT` result now includes an `inspect_job_id` column so the `INSPECT` job can be viewed separately. The `bulkio.import.row_count_validation.mode` cluster setting controls this behavior, with valid values of `off`, `async` (default), and `sync`. #163543 +- Queries executed via the vectorized engine now display their progress in the `phase` column of `SHOW QUERIES`. Previously, this feature was only available in the row-by-row engine. #158029 +- CockroachDB now shows execution statistics (like `execution time`) on `EXPLAIN ANALYZE` output for `render` nodes, which often handle built-in functions. #161509 +- The output of `EXPLAIN [ANALYZE]` in non-`VERBOSE` mode is now more succinct. #153361

Operational changes

-- The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. [#161050][#161050] -- Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). [#164037][#164037] -- Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. [#159436][#159436] -- The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. [#159499][#159499] -- Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. [#160129][#160129] -- Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. [#164405][#164405] -- RPC connection metrics now include a `protocol` label. The following metrics are affected: `rpc.connection.avg_round_trip_latency`, `rpc.connection.failures`, `rpc.connection.healthy`, `rpc.connection.healthy_nanos`, `rpc.connection.heartbeats`, `rpc.connection.tcp_rtt`, `rpc.connection.tcp_rtt_var`, `rpc.connection.unhealthy`, `rpc.connection.unhealthy_nanos`, and `rpc.connection.inactive`. In v26.2, the label value is always `grpc`. For example: `rpc_connection_healthy{node_id="1",remote_node_id="0",remote_addr="localhost:26258",class="system",protocol="grpc"} 1` [#162528][#162528] -- External connections can now be used with online restore. [#159090][#159090] -- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. [#160798][#160798] -- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. [#160901][#160901] -- Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. [#161043][#161043] -- Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. [#161062][#161062] -- Backup schedules that utilize the `revision_history` option now apply that option only to incremental backups triggered by that schedule, rather than duplicating the revision history in the full backups as well. [#162105][#162105] -- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). [#163834][#163834] -- Jobs now clear their running status messages upon successful completion. [#163765][#163765] -- Changefeed ranges are now more accurately reported as lagging. [#163427][#163427] +- The new `cockroach gen dashboard` command generates standardized monitoring dashboards from an embedded configuration file. It outputs a dashboard JSON file for either Datadog (`--tool=datadog`) or Grafana (`--tool=grafana`), with Grafana dashboards using Prometheus queries. The generated dashboards include metrics across Overview, Hardware, Runtime, Networking, SQL, and Storage categories. Use `--output` to set the output file path and `--rollup-interval` to control metric aggregation. #161050 +- Added the `server.sql_tcp_user.timeout` cluster setting, which specifies the maximum amount of time transmitted data can remain unacknowledged before the underlying TCP connection is forcefully closed. This setting is enabled by default with a value of 30 seconds and is supported on Linux and macOS (Darwin). #164037 +- Introduced a new cluster setting `kvadmission.store.snapshot_ingest_bandwidth_control.min_rate.enabled`. When this setting is enabled and disk bandwidth-based admission control is active, snapshot ingestion will be admitted at a minimum rate. This prevents snapshot ingestion from being starved by other elastic work. #159436 +- The `kv.range_split.load_sample_reset_duration` cluster setting now defaults to `30m`. This should improve load-based splitting in rare edge cases. #159499 +- Added the `kv.protectedts.protect`, `kv.protectedts.release`, `kv.protectedts.update_timestamp`, `kv.protectedts.get_record`, and `kv.protectedts.mark_verified` metrics to track protected timestamp storage operations. These metrics help diagnose issues with excessive protected timestamp churn and operational errors. Each operation tracks both successful completions (`.success`) and failures (`.failed`, such as `ErrExists` or `ErrNotExists`). Operators can monitor these metrics to understand PTS system behavior and identify performance issues related to backups, changefeeds, and other features that use protected timestamps. #160129 +- Added a new metric `sql.rls.policies_applied.count` that tracks the number of SQL statements where row-level security (RLS) policies were applied during query planning. #164405 +- RPC connection metrics now include a `protocol` label. The following metrics are affected: `rpc.connection.avg_round_trip_latency`, `rpc.connection.failures`, `rpc.connection.healthy`, `rpc.connection.healthy_nanos`, `rpc.connection.heartbeats`, `rpc.connection.tcp_rtt`, `rpc.connection.tcp_rtt_var`, `rpc.connection.unhealthy`, `rpc.connection.unhealthy_nanos`, and `rpc.connection.inactive`. In v26.2, the label value is always `grpc`. For example: `rpc_connection_healthy{node_id="1",remote_node_id="0",remote_addr="localhost:26258",class="system",protocol="grpc"} 1` #162528 +- External connections can now be used with online restore. #159090 +- Changed goroutine profile dumps from human-readable `.txt.gz` files to binary proto `.pb.gz` files. This improves the performance of the goroutine dumper by eliminating brief in-process pauses that occurred when collecting goroutine stacks. #160798 +- Added a new structured event of type `rewrite_inline_hints` that is emitted when an inline-hints rewrite rule is added using `information_schema.crdb_rewrite_inline_hints`. This event is written to both the event log and the `OPS` channel. #160901 +- Added a new metric `sql.query.with_statement_hints.count` that is incremented whenever a statement is executed with one or more external statement hints applied. An example of an external statement hint is an inline-hints rewrite rule added by calling `information_schema.crdb_rewrite_inline_hints`. #161043 +- Logical Data Replication (LDR) now supports hash-sharded indexes and secondary indexes with virtual computed columns. Previously, tables with these index types could not be replicated using LDR. #161062 +- Backup schedules that utilize the `revision_history` option now apply that option only to incremental backups triggered by that schedule, rather than duplicating the revision history in the full backups as well. #162105 +- The `build.timestamp` Prometheus metric now carries `major` and `minor` labels identifying the release series of the running CockroachDB binary (e.g., `major="26", minor="1"` for any v26.1.x build). #163834 +- Jobs now clear their running status messages upon successful completion. #163765 +- Changefeed ranges are now more accurately reported as lagging. #163427

Command-line changes

-- The `cockroach debug tsdump` command now defaults to `--format=raw` instead of `--format=text`. The `raw` (gob) format is optimized for Datadog ingestion. A new `--output` flag lets you write output directly to a file, avoiding potential file corruption that can occur with shell redirection. If `--output` is not specified, output is written to `stdout`. [#160538][#160538] -- The `cockroach debug tsdump` command now supports ZSTD encoding via `--format=raw --encoding=zstd`. This generates compressed tsdump files that are approximately 85% smaller than raw format. The `tsdump upload` command automatically detects and decompresses ZSTD files, allowing direct upload without manual decompression. [#161998][#161998] -- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. [#163266][#163266] -- Added a `--list-dbs` flag to `workload init workload_generator` that lists all user databases found in debug logs without initializing tables. This helps users discover which databases are available in the debug zip before running the full init command. [#163930][#163930] +- The `cockroach debug tsdump` command now defaults to `--format=raw` instead of `--format=text`. The `raw` (gob) format is optimized for Datadog ingestion. A new `--output` flag lets you write output directly to a file, avoiding potential file corruption that can occur with shell redirection. If `--output` is not specified, output is written to `stdout`. #160538 +- The `cockroach debug tsdump` command now supports ZSTD encoding via `--format=raw --encoding=zstd`. This generates compressed tsdump files that are approximately 85% smaller than raw format. The `tsdump upload` command automatically detects and decompresses ZSTD files, allowing direct upload without manual decompression. #161998 +- The `cockroach debug zip` command's `--include-files` and `--exclude-files` flags now support full zip path patterns. Patterns containing `/` are matched against the full path within the zip archive (e.g., `--include-files='debug/nodes/1/*.json'`). Patterns without `/` continue to match the base file name as before. #163266 +- Added a `--list-dbs` flag to `workload init workload_generator` that lists all user databases found in debug logs without initializing tables. This helps users discover which databases are available in the debug zip before running the full init command. #163930

DB Console changes

-- Added a new time-series bar graph called **Plan Distribution Over Time** to the **Statement Fingerprint** page, on the **Explain Plans** tab. It shows which execution plans were used in each time interval, helping detect shifts in query plan distributions. [#161011][#161011] -- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. [#160576][#160576] +- Added a new time-series bar graph called **Plan Distribution Over Time** to the **Statement Fingerprint** page, on the **Explain Plans** tab. It shows which execution plans were used in each time interval, helping detect shifts in query plan distributions. #161011 +- The **SQL Activity** > **Sessions** page now defaults the **Session Status** filter to **Active, Idle** to exclude closed sessions. #160576

Bug fixes

-- The fix for `node descriptor not found` errors for changefeeds with `execution_locality` filters in CockroachDB Basic and Standard clusters is now controlled by cluster setting `sql.instance_info.use_instance_resolver.enabled` (default: `true`). [#163947][#163947] -- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] -- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#159126][#159126] -- Fixed a bug that allowed a column to be dropped from a table even if it was referenced in the `RETURNING` clause of an `UPDATE` or `DELETE` statement in a routine. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. [#146250][#146250] -- CockroachDB could previously encounter internal errors like `column statistics cannot be determined for empty column set` and `invalid union` in some edge cases with `UNION`, `EXCEPT`, and `INTERCEPT`. This has now been fixed. [#150706][#150706] -- Fixed a bug that could cause a scan over a secondary index to read significantly more KVs than necessary in order to satisfy a limit when the scanned index had more than one column family. [#156672][#156672] -- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. [#158527][#158527] -- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. [#158935][#158935] -- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. [#158935][#158935] -- Fixed a bug where schema changes could fail after a `RESTORE` due to missing session data. [#159176][#159176] -- The `ascii` built-in function now returns `0` when the input is the empty string instead of an error. [#159178][#159178] -- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. [#159180][#159180] -- Fixed a bug which could cause prepared statements to fail with the error message `non-const expression` when they contained filters with stable functions. This bug has been present since 25.4.0. [#159201][#159201] -- Fixed a bug in the TPC-C workload where long-duration runs (>= 4 days or indefinite) would experience periodic performance degradation every 24 hours due to excessive concurrent `UPDATE` statements resetting warehouse and district year-to-date values. [#159286][#159286] -- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. [#159354][#159354] -- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. [#159378][#159378] -- Fixed a race condition that could occur during context cancellation of an incoming snapshot. [#159403][#159403] -- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. [#159431][#159431] -- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. [#159527][#159527] -- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. [#159642][#159642] -- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. [#159722][#159722] -- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. [#160346][#160346] -- Fixed a deadlock that could occur when a statistics creation task panicked. [#160348][#160348] -- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. [#160499][#160499] -- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. [#160608][#160608] -- Previously, v26.1.0-beta.1 and v26.1.0-beta.2 could encounter a rare process crash when running TTL jobs. This has been fixed. [#160674][#160674] -- Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. [#160780][#160780] -- An error will now be reported when the database provided as the argument to a `SHOW REGIONS` or `SHOW SUPER REGIONS` statement does not exist. This bug had been present since version v21.1. [#161014][#161014] -- Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. [#161083][#161083] -- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. [#161273][#161273] -- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. [#161290][#161290] -- Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. [#161318][#161318] -- Fixed a bug that prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug had existed since hint injection was introduced in v26.1.0-alpha.2. [#161773][#161773] -- Fixed prepared statements failing with `version mismatch` errors when user-defined types are modified between preparation and execution. Prepared statements now automatically detect UDT changes and re-parse to use current type definitions. [#161827][#161827] -- Previously, CockroachDB could hit an internal error when evaluating built-in functions with `'{}'` as an argument (without explicit type casts, such as on a query like `SELECT cardinality('{}');`). This is now fixed and a regular error is returned instead (matching PostgreSQL behavior). [#161835][#161835] -- Fixed a bug where the index definition shown in `pg_indexes` for hash sharded indexes with `STORING` columns was not valid SQL. The `STORING` clause now appears in the correct position. [#161882][#161882] -- Fixed a bug where `DROP TABLE ... CASCADE` would incorrectly drop tables that had triggers or row-level security (RLS) policies referencing the dropped table. Now only the triggers/policies are dropped, and the tables owning them remain intact. [#161914][#161914] -- Reduced contention when dropping descriptors or running concurrent imports. [#161941][#161941] -- Fixed a bug where multi-statement explicit transactions using `SAVEPOINT` to recover from certain errors (like duplicate key-value violations) could lose writes performed before the savepoint was created, in rare cases when buffered writes were enabled (off by default). This bug was introduced in v25.2. [#161972][#161972] -- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. [#161979][#161979] -- Fixed an error that occurred when using generic query plans that generates a lookup join on indexes containing identity computed columns. [#162036][#162036] -- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. [#162058][#162058] -- Fixed an internal error `could not find format code for column N` that occurred when executing `EXPLAIN ANALYZE EXECUTE` statements via JDBC or other clients using the PostgreSQL binary protocol. [#162115][#162115] -- Fixed a bug where statement bundles were missing `CREATE TYPE` statements for user-defined types used as array column types. [#162357][#162357] -- Fixed a bug in which PL/pgSQL UDFs with many `IF` statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. [#162512][#162512] -- Fixed a bug where an error would occur when defining a foreign key on a hash-sharded primary key without explicitly providing the primary key columns. [#162608][#162608] -- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. [#163224][#163224] -- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. [#163244][#163244] -- Fixed an optimizer limitation that prevented index usage on computed columns when querying through views or subqueries containing JSON fetch expressions (such as `->`, `->>`, `#>`, or `#>>`). Queries that project JSON expressions matching indexed computed column definitions now correctly use indexes instead of performing full table scans, significantly improving performance for JSON workloads. [#163395][#163395] +- The fix for `node descriptor not found` errors for changefeeds with `execution_locality` filters in CockroachDB Basic and Standard clusters is now controlled by cluster setting `sql.instance_info.use_instance_resolver.enabled` (default: `true`). #163947 +- Fixed a bug that caused a routine with an `INSERT` statement to unnecessarily block dropping a hash-sharded index or computed column on the target table. This fix applies only to newly created routines. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. #146250 +- Fixed a bug where creating a routine could create unnecessary column dependencies when the routine references columns through CHECK constraints (including those for RLS policies and hash-sharded indexes) or partial index predicates. These unnecessary dependencies prevented dropping the column without first dropping the routine. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. #159126 +- Fixed a bug that allowed a column to be dropped from a table even if it was referenced in the `RETURNING` clause of an `UPDATE` or `DELETE` statement in a routine. In releases prior to v25.3, the fix must be enabled by setting the session variable `use_improved_routine_dependency_tracking` to `on`. #146250 +- CockroachDB could previously encounter internal errors like `column statistics cannot be determined for empty column set` and `invalid union` in some edge cases with `UNION`, `EXCEPT`, and `INTERCEPT`. This has now been fixed. #150706 +- Fixed a bug that could cause a scan over a secondary index to read significantly more KVs than necessary in order to satisfy a limit when the scanned index had more than one column family. #156672 +- Fixed an issue where long-running transactions with many statements could cause unbounded memory growth in the SQL statistics subsystem. When a transaction includes a large number of statements, the SQL statistics ingester now automatically flushes buffered statistics before the transaction commits. As a side effect, the flushed statement statistics might not have an associated transaction fingerprint ID because the transaction has not yet completed. In such cases, the transaction fingerprint ID cannot be backfilled after the fact. #158527 +- Fixed a bug that allowed columns to be dropped despite being referenced by a routine. This could occur when a column was only referenced as a target column in the `SET` clause of an `UPDATE` statement within the routine. This fix only applies to newly-created routines. In versions prior to v26.1, the fix must be enabled by setting the session variable `prevent_update_set_column_drop`. #158935 +- Fixed a bug that caused newly-created routines to incorrectly prevent dropping columns that were not directly referenced, most notably columns referenced by computed column expressions. The fix is gated behind the session setting `use_improved_routine_deps_triggers_and_computed_cols`, which is off by default prior to v26.1. #158935 +- Fixed a bug where schema changes could fail after a `RESTORE` due to missing session data. #159176 +- The `ascii` built-in function now returns `0` when the input is the empty string instead of an error. #159178 +- Fixed a bug where comments associated with constraints were left behind after the column and constraint were dropped. #159180 +- Fixed a bug which could cause prepared statements to fail with the error message `non-const expression` when they contained filters with stable functions. This bug has been present since 25.4.0. #159201 +- Fixed a bug in the TPC-C workload where long-duration runs (>= 4 days or indefinite) would experience periodic performance degradation every 24 hours due to excessive concurrent `UPDATE` statements resetting warehouse and district year-to-date values. #159286 +- Fixed a race condition where queries run after revoking `BYPASSRLS` could return wrong results because cached plans failed to notice the change immediately. #159354 +- Fixed a bug where `TRUNCATE` did not behave correctly with respect to the `schema_locked` storage parameter, and was not being blocked when Logical Data Replication (LDR) was in use. This behavior was incorrect and has been fixed. #159378 +- Fixed a race condition that could occur during context cancellation of an incoming snapshot. #159403 +- Fixed a bug that could cause a panic during changefeed startup if an error occurred while initializing the metrics controller. #159431 +- Fixed a memory accounting issue that could occur when a lease expired due to a SQL liveness session-based timeout. #159527 +- Fixed a bug that caused `SHOW CREATE FUNCTION` to error when the function body contained casts from columns to user-defined types. #159642 +- Fixed a bug where a query predicate could be ignored when all of the following conditions were met: the query used a lookup join to an index, the predicate constrained a column to multiple values (e.g., `column IN (1, 2)`), and the constrained column followed one or more columns with optional multi-value constraints in the index. This bug was introduced in v24.3.0. #159722 +- Fixed a bug where rolling back a transaction that had just rolled back a savepoint would block other transactions accessing the same rows for five seconds. #160346 +- Fixed a deadlock that could occur when a statistics creation task panicked. #160348 +- Fixed a bug where CockroachDB could crash when handling decimals with negative scales via the extended PGWire protocol. An error is now returned instead, matching PostgreSQL behavior. #160499 +- Fixed a bug where the `pprof` UI endpoints for allocs, heap, block, and mutex profiles ignored the seconds parameter and returned immediate snapshots instead of delta profiles. #160608 +- Previously, v26.1.0-beta.1 and v26.1.0-beta.2 could encounter a rare process crash when running TTL jobs. This has been fixed. #160674 +- Fixed a bug where schema changes adding a `NOT NULL` constraint could enter an infinite retry loop if a row violated the constraint and contained certain content (e.g., `"EOF"`). Such errors are now correctly classified and don't cause retries. #160780 +- An error will now be reported when the database provided as the argument to a `SHOW REGIONS` or `SHOW SUPER REGIONS` statement does not exist. This bug had been present since version v21.1. #161014 +- Fixed a bug where `CREATE INDEX` on a table with `PARTITION ALL BY` would fail if the partition columns were explicitly included in the primary key definition. #161083 +- Fixed a bug in which inline-hints rewrite rules created with `information_schema.crdb_rewrite_inline_hints` were not correctly applied to statements run with `EXPLAIN ANALYZE`. This bug was introduced in v26.1.0-alpha.2. #161273 +- Fixed a bug where AVRO file imports of data with JSON or binary records could hang indefinitely when encountering stream errors from cloud storage (such as `HTTP/2` `CANCEL` errors). Import jobs will now properly fail with an error instead of hanging. #161290 +- Fixed a bug where IMPORT with AVRO data using OCF format could silently lose data if the underlying storage (e.g., S3) returned an error during read. Such errors are now properly reported. Other formats (specified via `data_as_binary_records` and `data_as_json_records` options) are unaffected. The bug has been present since about v20.1. #161318 +- Fixed a bug that prevented successfully injecting hints using `information_schema.crdb_rewrite_inline_hints` for `INSERT`, `UPSERT`, `UPDATE`, and `DELETE` statements. This bug had existed since hint injection was introduced in v26.1.0-alpha.2. #161773 +- Fixed prepared statements failing with `version mismatch` errors when user-defined types are modified between preparation and execution. Prepared statements now automatically detect UDT changes and re-parse to use current type definitions. #161827 +- Previously, CockroachDB could hit an internal error when evaluating built-in functions with `'{}'` as an argument (without explicit type casts, such as on a query like `SELECT cardinality('{}');`). This is now fixed and a regular error is returned instead (matching PostgreSQL behavior). #161835 +- Fixed a bug where the index definition shown in `pg_indexes` for hash sharded indexes with `STORING` columns was not valid SQL. The `STORING` clause now appears in the correct position. #161882 +- Fixed a bug where `DROP TABLE ... CASCADE` would incorrectly drop tables that had triggers or row-level security (RLS) policies referencing the dropped table. Now only the triggers/policies are dropped, and the tables owning them remain intact. #161914 +- Reduced contention when dropping descriptors or running concurrent imports. #161941 +- Fixed a bug where multi-statement explicit transactions using `SAVEPOINT` to recover from certain errors (like duplicate key-value violations) could lose writes performed before the savepoint was created, in rare cases when buffered writes were enabled (off by default). This bug was introduced in v25.2. #161972 +- Fixed a bug introduced in v26.1.0-beta.1 in which row-level TTL jobs could encounter GC threshold errors if each node had a large number of spans to process. #161979 +- Fixed an error that occurred when using generic query plans that generates a lookup join on indexes containing identity computed columns. #162036 +- Fixed a bug that could cause changefeeds using Kafka v1 sinks to hang when the changefeed was cancelled. #162058 +- Fixed an internal error `could not find format code for column N` that occurred when executing `EXPLAIN ANALYZE EXECUTE` statements via JDBC or other clients using the PostgreSQL binary protocol. #162115 +- Fixed a bug where statement bundles were missing `CREATE TYPE` statements for user-defined types used as array column types. #162357 +- Fixed a bug in which PL/pgSQL UDFs with many `IF` statements would cause a timeout and/or OOM when executed from a prepared statement. This bug was introduced in v23.2.22, v24.1.15, v24.3.9, v25.1.2, and v25.2.0. #162512 +- Fixed a bug where an error would occur when defining a foreign key on a hash-sharded primary key without explicitly providing the primary key columns. #162608 +- Fixed a bug where generating a debug zip could trigger an out-of-memory (OOM) condition on a node if malformed log entries were present in logs using `json` or `json-compact` formatting. This bug was introduced in v24.1. #163224 +- Fixed a bug that prevented the `optimizer_min_row_count` setting from applying to anti-join expressions, which could lead to bad query plans. The fix is gated behind `optimizer_use_min_row_count_anti_join_fix`, which is on by default on v26.2 and later, and off by default in earlier versions. #163244 +- Fixed an optimizer limitation that prevented index usage on computed columns when querying through views or subqueries containing JSON fetch expressions (such as `->`, `->>`, `#>`, or `#>>`). Queries that project JSON expressions matching indexed computed column definitions now correctly use indexes instead of performing full table scans, significantly improving performance for JSON workloads. #163395 - Fixed a bug that could cause incorrect results for any of the following types of statements: - Prepared statements with `LIMIT` expressions where the limit is a placeholder and the given placeholder value is negative. This could result in a successful query when the correct result is an error. - Prepared statements with `OFFSET` expressions where the limit is a placeholder. In some cases this could produce incorrect results. - - Statements within a UDF or stored procedure similar to (1) and (2) where the limit/offset is a reference to an argument of the UDF/SP. [#163500][#163500] -- Dropping a region from the system database no longer leaves `REGIONAL BY TABLE` system tables referencing the removed region, preventing descriptor validation errors. [#163503][#163503] -- Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. [#163507][#163507] -- Fixed a bug where `EXPLAIN ANALYZE (DEBUG)` statement bundles did not include triggers, their functions, or tables modified by those triggers. The bundle's `schema.sql` file now contains the `CREATE TRIGGER`, `CREATE FUNCTION`, and `CREATE TABLE` statements needed to fully reproduce the query environment when triggers are involved. [#163584][#163584] -- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. [#163883][#163883] -- Fixed a bug where running **changefeeds** with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a **cluster upgrade**. [#163885][#163885] -- Fixed a bug where dropped columns appeared in `pg_catalog.pg_attribute` with the `atttypid` column equal to 2283 (`anyelement`). Now this column will be 0 for dropped columns. This matches PostgreSQL behavior, where `atttypid=0` is used for dropped columns. [#163950][#163950] -- Fixed a race condition/conflict between concurrent `ALTER FUNCTION ... SET SCHEMA` and `DROP SCHEMA` operations. [#164043][#164043] -- Fixed a bug where super region zone configurations did not constrain all replicas to regions within the super region. [#164285][#164285] -- Fixed a bug where CockroachDB returned "cached plan must not change result type" errors during the `Execute` phase instead of the `Bind` phase of the extended pgwire protocol. This caused compatibility issues with drivers like pgx that expect the error before `BindComplete` is sent, particularly when using batch operations with prepared statements after schema changes. [#164406][#164406] -- Statistics histogram collection is now skipped for JSON columns referenced in partial index predicates, except when `sql.stats.non_indexed_json_histograms.enabled` is true (default: false). [#164477][#164477] -- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. [#159627][#159627] -- Invalid `avro_schema_prefix` is now caught during statement time. The prefix must start with `[A-Za-z_]` and subsequently contain only `[A-Za-z0-9_]`, as specified in the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html). [#159869][#159869] + - Statements within a UDF or stored procedure similar to (1) and (2) where the limit/offset is a reference to an argument of the UDF/SP. #163500 +- Dropping a region from the system database no longer leaves `REGIONAL BY TABLE` system tables referencing the removed region, preventing descriptor validation errors. #163503 +- Fixed an issue where changefeeds with `execution_locality` filters could fail in multi-tenant clusters with `node descriptor not found` errors. #163507 +- Fixed a bug where `EXPLAIN ANALYZE (DEBUG)` statement bundles did not include triggers, their functions, or tables modified by those triggers. The bundle's `schema.sql` file now contains the `CREATE TRIGGER`, `CREATE FUNCTION`, and `CREATE TABLE` statements needed to fully reproduce the query environment when triggers are involved. #163584 +- Fixed a rare data race during parallel constraint checks where a fresh descriptor collection could resolve a stale enum type version. This bug was introduced in v26.1.0. #163883 +- Fixed a bug where running **changefeeds** with `envelope=enriched` and `enriched_properties` containing `source` would cause failures during a **cluster upgrade**. #163885 +- Fixed a bug where dropped columns appeared in `pg_catalog.pg_attribute` with the `atttypid` column equal to 2283 (`anyelement`). Now this column will be 0 for dropped columns. This matches PostgreSQL behavior, where `atttypid=0` is used for dropped columns. #163950 +- Fixed a race condition/conflict between concurrent `ALTER FUNCTION ... SET SCHEMA` and `DROP SCHEMA` operations. #164043 +- Fixed a bug where super region zone configurations did not constrain all replicas to regions within the super region. #164285 +- Fixed a bug where CockroachDB returned "cached plan must not change result type" errors during the `Execute` phase instead of the `Bind` phase of the extended pgwire protocol. This caused compatibility issues with drivers like pgx that expect the error before `BindComplete` is sent, particularly when using batch operations with prepared statements after schema changes. #164406 +- Statistics histogram collection is now skipped for JSON columns referenced in partial index predicates, except when `sql.stats.non_indexed_json_histograms.enabled` is true (default: false). #164477 +- Fixed a bug where import rollback could incorrectly revert data in a table that was already online. This could only occur if an import job was cancelled or failed after the import had already succeeded and the table was made available for use. #159627 +- Invalid `avro_schema_prefix` is now caught during statement time. The prefix must start with `[A-Za-z_]` and subsequently contain only `[A-Za-z0-9_]`, as specified in the [Avro specification](https://avro.apache.org/docs/1.8.1/spec.html). #159869

Performance improvements

-- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. [#160051][#160051] -- Database- and table-level backups no longer fetch all object descriptors from disk in order to resolve the backup targets. Now only the objects that are referenced by the targeted objects will be fetched. This improves performance when there are many tables in the cluster. [#157790][#157790] -- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. [#159205][#159205] -- Improved changefeed performance when filtering unwatched column families and offline tables by replacing expensive error chain traversal with direct status enum comparisons. [#159745][#159745] -- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. [#160121][#160121] -- Queries that have comparison expressions with the `levenshtein` built-in are now up to 30% faster. [#160394][#160394] -- The optimizer now better optimizes query plans of statements within UDFs and stored procedures that have `IN` subqueries. [#160503][#160503] -- Significantly reduced WAL write latency when using encryption at rest by properly recycling WAL files instead of deleting and recreating them. [#160784][#160784] -- Optimized the logic that applies zone config constraints so it no longer fetches all descriptors in the cluster during background constraint reconciliation. [#160966][#160966] -- The optimizer can now better handle filters that redundantly `unnest()` an array placeholder argument within an `IN` or `ANY` filter. Previously, this pattern could prevent the filters from being used to constrain a table scan. Example: `SELECT k FROM a WHERE k = ANY(SELECT * FROM unnest($1:::INT[]))` [#161816][#161816] -- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. [#162546][#162546] -- The query optimizer now eliminates redundant filter and projection operators over inputs with zero cardinality, even when the filter or projection expressions are not leakproof. This produces simpler, more efficient query plans in cases where joins or other operations fold to zero rows. [#164212][#164212] +- Added a new session variable, `distsql_prevent_partitioning_soft_limited_scans`, which, when true, prevents scans with soft limits from being planned as multiple TableReaders by the physical planner. This should decrease the initial setup costs of some fully-distributed query plans. #160051 +- Database- and table-level backups no longer fetch all object descriptors from disk in order to resolve the backup targets. Now only the objects that are referenced by the targeted objects will be fetched. This improves performance when there are many tables in the cluster. #157790 +- Various background tasks and jobs now more actively yield to foreground work when that work is waiting to run. #159205 +- Improved changefeed performance when filtering unwatched column families and offline tables by replacing expensive error chain traversal with direct status enum comparisons. #159745 +- Fixed a performance regression in `pg_catalog.pg_roles` and `pg_catalog.pg_authid` by avoiding privilege lookups for each row in the table. #160121 +- Queries that have comparison expressions with the `levenshtein` built-in are now up to 30% faster. #160394 +- The optimizer now better optimizes query plans of statements within UDFs and stored procedures that have `IN` subqueries. #160503 +- Significantly reduced WAL write latency when using encryption at rest by properly recycling WAL files instead of deleting and recreating them. #160784 +- Optimized the logic that applies zone config constraints so it no longer fetches all descriptors in the cluster during background constraint reconciliation. #160966 +- The optimizer can now better handle filters that redundantly `unnest()` an array placeholder argument within an `IN` or `ANY` filter. Previously, this pattern could prevent the filters from being used to constrain a table scan. Example: `SELECT k FROM a WHERE k = ANY(SELECT * FROM unnest($1:::INT[]))` #161816 +- Improved changefeed checkpointing performance when changefeeds are lagging. Previously, checkpoint updates could be redundantly applied multiple times per checkpoint operation. #162546 +- The query optimizer now eliminates redundant filter and projection operators over inputs with zero cardinality, even when the filter or projection expressions are not leakproof. This produces simpler, more efficient query plans in cases where joins or other operations fold to zero rows. #164212 -[#161806]: https://github.com/cockroachdb/cockroach/pull/161806 -[#164236]: https://github.com/cockroachdb/cockroach/pull/164236 -[#158527]: https://github.com/cockroachdb/cockroach/pull/158527 -[#159176]: https://github.com/cockroachdb/cockroach/pull/159176 -[#159205]: https://github.com/cockroachdb/cockroach/pull/159205 -[#163296]: https://github.com/cockroachdb/cockroach/pull/163296 -[#156672]: https://github.com/cockroachdb/cockroach/pull/156672 -[#162036]: https://github.com/cockroachdb/cockroach/pull/162036 -[#163500]: https://github.com/cockroachdb/cockroach/pull/163500 -[#161816]: https://github.com/cockroachdb/cockroach/pull/161816 -[#161915]: https://github.com/cockroachdb/cockroach/pull/161915 -[#160503]: https://github.com/cockroachdb/cockroach/pull/160503 -[#163765]: https://github.com/cockroachdb/cockroach/pull/163765 -[#160416]: https://github.com/cockroachdb/cockroach/pull/160416 -[#161265]: https://github.com/cockroachdb/cockroach/pull/161265 -[#158029]: https://github.com/cockroachdb/cockroach/pull/158029 -[#160346]: https://github.com/cockroachdb/cockroach/pull/160346 -[#160674]: https://github.com/cockroachdb/cockroach/pull/160674 -[#161941]: https://github.com/cockroachdb/cockroach/pull/161941 -[#164468]: https://github.com/cockroachdb/cockroach/pull/164468 -[#161422]: https://github.com/cockroachdb/cockroach/pull/161422 -[#161509]: https://github.com/cockroachdb/cockroach/pull/161509 -[#161011]: https://github.com/cockroachdb/cockroach/pull/161011 -[#164285]: https://github.com/cockroachdb/cockroach/pull/164285 -[#160901]: https://github.com/cockroachdb/cockroach/pull/160901 -[#161226]: https://github.com/cockroachdb/cockroach/pull/161226 -[#159354]: https://github.com/cockroachdb/cockroach/pull/159354 -[#161273]: https://github.com/cockroachdb/cockroach/pull/161273 -[#163883]: https://github.com/cockroachdb/cockroach/pull/163883 -[#160051]: https://github.com/cockroachdb/cockroach/pull/160051 -[#159659]: https://github.com/cockroachdb/cockroach/pull/159659 -[#161880]: https://github.com/cockroachdb/cockroach/pull/161880 -[#159499]: https://github.com/cockroachdb/cockroach/pull/159499 -[#160499]: https://github.com/cockroachdb/cockroach/pull/160499 -[#160608]: https://github.com/cockroachdb/cockroach/pull/160608 -[#159178]: https://github.com/cockroachdb/cockroach/pull/159178 -[#163947]: https://github.com/cockroachdb/cockroach/pull/163947 -[#164406]: https://github.com/cockroachdb/cockroach/pull/164406 -[#159126]: https://github.com/cockroachdb/cockroach/pull/159126 -[#162115]: https://github.com/cockroachdb/cockroach/pull/162115 -[#162608]: https://github.com/cockroachdb/cockroach/pull/162608 -[#162546]: https://github.com/cockroachdb/cockroach/pull/162546 -[#159189]: https://github.com/cockroachdb/cockroach/pull/159189 -[#163199]: https://github.com/cockroachdb/cockroach/pull/163199 -[#163427]: https://github.com/cockroachdb/cockroach/pull/163427 -[#160538]: https://github.com/cockroachdb/cockroach/pull/160538 -[#164212]: https://github.com/cockroachdb/cockroach/pull/164212 -[#159627]: https://github.com/cockroachdb/cockroach/pull/159627 -[#159231]: https://github.com/cockroachdb/cockroach/pull/159231 -[#160780]: https://github.com/cockroachdb/cockroach/pull/160780 -[#163224]: https://github.com/cockroachdb/cockroach/pull/163224 -[#163507]: https://github.com/cockroachdb/cockroach/pull/163507 -[#159090]: https://github.com/cockroachdb/cockroach/pull/159090 -[#159162]: https://github.com/cockroachdb/cockroach/pull/159162 -[#161014]: https://github.com/cockroachdb/cockroach/pull/161014 -[#161979]: https://github.com/cockroachdb/cockroach/pull/161979 -[#160121]: https://github.com/cockroachdb/cockroach/pull/160121 -[#162329]: https://github.com/cockroachdb/cockroach/pull/162329 -[#159787]: https://github.com/cockroachdb/cockroach/pull/159787 -[#161294]: https://github.com/cockroachdb/cockroach/pull/161294 -[#162633]: https://github.com/cockroachdb/cockroach/pull/162633 -[#164369]: https://github.com/cockroachdb/cockroach/pull/164369 -[#160798]: https://github.com/cockroachdb/cockroach/pull/160798 -[#159378]: https://github.com/cockroachdb/cockroach/pull/159378 -[#159431]: https://github.com/cockroachdb/cockroach/pull/159431 -[#161882]: https://github.com/cockroachdb/cockroach/pull/161882 -[#160784]: https://github.com/cockroachdb/cockroach/pull/160784 -[#153361]: https://github.com/cockroachdb/cockroach/pull/153361 -[#156963]: https://github.com/cockroachdb/cockroach/pull/156963 -[#162286]: https://github.com/cockroachdb/cockroach/pull/162286 -[#161043]: https://github.com/cockroachdb/cockroach/pull/161043 -[#163395]: https://github.com/cockroachdb/cockroach/pull/163395 -[#161211]: https://github.com/cockroachdb/cockroach/pull/161211 -[#164405]: https://github.com/cockroachdb/cockroach/pull/164405 -[#158935]: https://github.com/cockroachdb/cockroach/pull/158935 -[#160348]: https://github.com/cockroachdb/cockroach/pull/160348 -[#161083]: https://github.com/cockroachdb/cockroach/pull/161083 -[#159745]: https://github.com/cockroachdb/cockroach/pull/159745 -[#161318]: https://github.com/cockroachdb/cockroach/pull/161318 -[#164225]: https://github.com/cockroachdb/cockroach/pull/164225 -[#158835]: https://github.com/cockroachdb/cockroach/pull/158835 -[#158999]: https://github.com/cockroachdb/cockroach/pull/158999 -[#160812]: https://github.com/cockroachdb/cockroach/pull/160812 -[#163348]: https://github.com/cockroachdb/cockroach/pull/163348 -[#162105]: https://github.com/cockroachdb/cockroach/pull/162105 -[#163834]: https://github.com/cockroachdb/cockroach/pull/163834 -[#163244]: https://github.com/cockroachdb/cockroach/pull/163244 -[#163584]: https://github.com/cockroachdb/cockroach/pull/163584 -[#163867]: https://github.com/cockroachdb/cockroach/pull/163867 -[#161998]: https://github.com/cockroachdb/cockroach/pull/161998 -[#161835]: https://github.com/cockroachdb/cockroach/pull/161835 -[#160956]: https://github.com/cockroachdb/cockroach/pull/160956 -[#159286]: https://github.com/cockroachdb/cockroach/pull/159286 -[#161914]: https://github.com/cockroachdb/cockroach/pull/161914 -[#162512]: https://github.com/cockroachdb/cockroach/pull/162512 -[#162528]: https://github.com/cockroachdb/cockroach/pull/162528 -[#161230]: https://github.com/cockroachdb/cockroach/pull/161230 -[#163266]: https://github.com/cockroachdb/cockroach/pull/163266 -[#163503]: https://github.com/cockroachdb/cockroach/pull/163503 -[#156771]: https://github.com/cockroachdb/cockroach/pull/156771 -[#161924]: https://github.com/cockroachdb/cockroach/pull/161924 -[#161972]: https://github.com/cockroachdb/cockroach/pull/161972 -[#160129]: https://github.com/cockroachdb/cockroach/pull/160129 -[#163868]: https://github.com/cockroachdb/cockroach/pull/163868 -[#159527]: https://github.com/cockroachdb/cockroach/pull/159527 -[#159436]: https://github.com/cockroachdb/cockroach/pull/159436 -[#161062]: https://github.com/cockroachdb/cockroach/pull/161062 -[#161773]: https://github.com/cockroachdb/cockroach/pull/161773 -[#164043]: https://github.com/cockroachdb/cockroach/pull/164043 -[#161925]: https://github.com/cockroachdb/cockroach/pull/161925 -[#162302]: https://github.com/cockroachdb/cockroach/pull/162302 -[#160137]: https://github.com/cockroachdb/cockroach/pull/160137 -[#160716]: https://github.com/cockroachdb/cockroach/pull/160716 -[#159642]: https://github.com/cockroachdb/cockroach/pull/159642 -[#160394]: https://github.com/cockroachdb/cockroach/pull/160394 -[#159869]: https://github.com/cockroachdb/cockroach/pull/159869 -[#161050]: https://github.com/cockroachdb/cockroach/pull/161050 -[#162058]: https://github.com/cockroachdb/cockroach/pull/162058 -[#164477]: https://github.com/cockroachdb/cockroach/pull/164477 -[#164037]: https://github.com/cockroachdb/cockroach/pull/164037 -[#150706]: https://github.com/cockroachdb/cockroach/pull/150706 -[#161827]: https://github.com/cockroachdb/cockroach/pull/161827 -[#160576]: https://github.com/cockroachdb/cockroach/pull/160576 -[#146250]: https://github.com/cockroachdb/cockroach/pull/146250 -[#159558]: https://github.com/cockroachdb/cockroach/pull/159558 -[#159722]: https://github.com/cockroachdb/cockroach/pull/159722 -[#161328]: https://github.com/cockroachdb/cockroach/pull/161328 -[#160486]: https://github.com/cockroachdb/cockroach/pull/160486 -[#163430]: https://github.com/cockroachdb/cockroach/pull/163430 -[#159201]: https://github.com/cockroachdb/cockroach/pull/159201 -[#162357]: https://github.com/cockroachdb/cockroach/pull/162357 -[#157790]: https://github.com/cockroachdb/cockroach/pull/157790 -[#162345]: https://github.com/cockroachdb/cockroach/pull/162345 -[#159180]: https://github.com/cockroachdb/cockroach/pull/159180 -[#163885]: https://github.com/cockroachdb/cockroach/pull/163885 -[#163950]: https://github.com/cockroachdb/cockroach/pull/163950 -[#159330]: https://github.com/cockroachdb/cockroach/pull/159330 -[#163543]: https://github.com/cockroachdb/cockroach/pull/163543 -[#163930]: https://github.com/cockroachdb/cockroach/pull/163930 -[#159403]: https://github.com/cockroachdb/cockroach/pull/159403 -[#161290]: https://github.com/cockroachdb/cockroach/pull/161290 -[#160966]: https://github.com/cockroachdb/cockroach/pull/160966 diff --git a/src/current/_includes/releases/v26.2/v26.2.0-alpha.2.md b/src/current/_includes/releases/v26.2/v26.2.0-alpha.2.md index c0063430223..8b58e95a59e 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0-alpha.2.md +++ b/src/current/_includes/releases/v26.2/v26.2.0-alpha.2.md @@ -6,103 +6,63 @@ Release Date: March 18, 2026

Backward-incompatible changes

-- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. [#164664][#164664] -- The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. [#163866][#163866] -- Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. [#164468][#164468] -- Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. [#164874][#164874] -- Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. [#164433][#164433] -- Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. [#165382][#165382] -- The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. [#165227][#165227] +- When selecting from a view, the view owner's privileges on the underlying tables are now checked. Previously, no privilege checks were performed on the underlying tables, so a view would continue to work even after the owner lost access to the underlying tables. This also affects row-level security (RLS): the view owner's RLS policies are now enforced instead of the invoker's. If this causes issues, you can restore the previous behavior by setting the cluster setting `sql.auth.skip_underlying_view_privilege_checks.enabled` to `true`. #164664 +- The `bulkio.index_backfill.elastic_control.enabled` cluster setting is now enabled by default, allowing index backfill operations to integrate with elastic CPU control and automatically throttle based on available resources. #163866 +- Lowered the default value of the `sql.guardrails.max_row_size_log` cluster setting from `64 MiB` to `16 MiB`, and the default value of `sql.guardrails.max_row_size_err` from `512 MiB` to `80 MiB`. These settings control the maximum size of a row (or column family) that SQL can write before logging a warning or returning an error, respectively. The previous defaults were high enough that large rows would hit other limits first (such as the Raft command size limit or the backup SST size limit), producing confusing errors. The new defaults align with existing system limits to provide clearer diagnostics. If your workload legitimately writes rows larger than these new defaults, you can restore the previous behavior by increasing these settings. #164468 +- Lowered the default value of the `changefeed.max_retry_backoff` cluster setting from `10m` to `30s` to reduce changefeed lag during rolling restarts. #164874 +- Using `ALTER CHANGEFEED ADD ...` for a table that is already watched will now return an error: `target already watched by changefeed`. #164433 +- Explicit `AS OF SYSTEM TIME` queries are no longer allowed on a Physical Cluster Replication (PCR) reader virtual cluster, unless the `bypass_pcr_reader_catalog_aost` session variable is set to `true`. This session variable should only be used during investigation or for changing cluster settings specific to the reader virtual cluster. #165382 +- The `enable_super_regions` session variable and the `sql.defaults.super_regions.enabled` cluster setting are no longer required to use super regions. Super region DDL operations (`ADD`, `DROP`, and `ALTER SUPER REGION`) now work without any experimental flag. The session variable and cluster setting are deprecated, and existing scripts that set them will continue to work without error. #165227

Security updates

-- When the `security.provisioning.ldap.enabled` cluster setting is enabled, LDAP-authenticated DB Console logins now update the `estimated_last_login_time` column in the `system.users` table. [#163400][#163400] -- When the `security.provisioning.oidc.enabled` cluster setting is enabled, OIDC-authenticated DB Console logins now populate the `estimated_last_login_time` column in `system.users`, allowing administrators to track when OIDC users last accessed the DB Console. [#164129][#164129] +- When the `security.provisioning.ldap.enabled` cluster setting is enabled, LDAP-authenticated DB Console logins now update the `estimated_last_login_time` column in the `system.users` table. #163400 +- When the `security.provisioning.oidc.enabled` cluster setting is enabled, OIDC-authenticated DB Console logins now populate the `estimated_last_login_time` column in `system.users`, allowing administrators to track when OIDC users last accessed the DB Console. #164129

SQL language changes

-- CockroachDB now supports `COMMIT AND CHAIN` and `ROLLBACK AND CHAIN` (as well as `END AND CHAIN` and `ABORT AND CHAIN`). These statements finish the current transaction and immediately start a new explicit transaction with the same isolation level, priority, and read/write mode as the previous transaction. `AND NO CHAIN` is also accepted for PostgreSQL compatibility but behaves identically to a plain `COMMIT` or `ROLLBACK`. [#164403][#164403] -- `RESTORE TABLE/DATABASE` now supports the `WITH GRANTS` option, which restores grants on restore targets for users in the restoring cluster. Note that using this option with `new_db_name` will cause the new database to inherit the privileges in the backed-up database. [#164444][#164444] -- Added support for a new statement hint used to change session variable values for the duration of a single statement without application changes. The new hint type can be created using the `information_schema.crdb_set_session_variable_hint` built-in function. The override applies only when executing a statement matching the given fingerprint and does not persist on the session or surrounding transaction. [#164909][#164909] -- Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. [#163378][#163378] -- Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. [#164184][#164184] -- Added the `ST_AsMVT` aggregate function to generate Mapbox Vector Tile (MVT) binary format from geospatial data, providing PostgreSQL/PostGIS compatibility for web mapping applications. [#150663][#150663] -- Introduced the `information_schema.crdb_delete_statement_hints` built-in function, which accepts 2 kinds of payload: `row_id` (int): the primary key of `system.statement_hints`; `fingerprint` (string). The function returns the number of rows deleted. [#163891][#163891] -- Added `to_date(text, text)` and `to_timestamp(text, text)` SQL functions that parse dates and timestamps from formatted strings using PostgreSQL-compatible format patterns. For example, `to_date('2023-03-15', 'YYYY-MM-DD')` returns a date, and `to_timestamp('2023-03-15 14:30:45', 'YYYY-MM-DD HH24:MI:SS')` returns a `timestamptz`. [#164672][#164672] -- Added PostgreSQL-compatible numeric formatting functions `to_char(int, text)`, `to_char(float, text)`, `to_char(numeric, text)`, and `to_number(text, text)`. These functions format numbers as strings and parse formatted strings back to numbers using the PostgreSQL formatting syntax. [#164672][#164672] -- Added support for importing Parquet files using the `IMPORT` statement. Parquet files can be imported from cloud storage URLs (`s3://`, `gs://`, `azure://`) or HTTP servers that support range requests (`Accept-Ranges: bytes`). This feature supports column-level compression formats (Snappy, GZIP, ZSTD, Brotli, etc.) as specified in the Parquet file format, but does not support additional file-level compression (e.g., `.parquet.gz` files). Nested Parquet types (lists, maps, structs) are not supported; only flat schemas with primitive types are supported at this time. [#163991][#163991] -- CockroachDB now includes `information_schema.crdb_rewrite_inline_hints` statements in the `schema.sql` file of a statement diagnostics bundle for re-creating all the statement hints bound to the statement. The hint recreation statements are sorted in ascending order of the original hint creation time. [#164164][#164164] -- `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. [#161763][#161763] -- During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. [#164449][#164449] -- Active Session History tables are now accessible via `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history`, in addition to the existing `crdb_internal` tables. This improves discoverability when browsing `information_schema` for available metadata. [#164969][#164969] +- CockroachDB now supports `COMMIT AND CHAIN` and `ROLLBACK AND CHAIN` (as well as `END AND CHAIN` and `ABORT AND CHAIN`). These statements finish the current transaction and immediately start a new explicit transaction with the same isolation level, priority, and read/write mode as the previous transaction. `AND NO CHAIN` is also accepted for PostgreSQL compatibility but behaves identically to a plain `COMMIT` or `ROLLBACK`. #164403 +- `RESTORE TABLE/DATABASE` now supports the `WITH GRANTS` option, which restores grants on restore targets for users in the restoring cluster. Note that using this option with `new_db_name` will cause the new database to inherit the privileges in the backed-up database. #164444 +- Added support for a new statement hint used to change session variable values for the duration of a single statement without application changes. The new hint type can be created using the `information_schema.crdb_set_session_variable_hint` built-in function. The override applies only when executing a statement matching the given fingerprint and does not persist on the session or surrounding transaction. #164909 +- Added an index storage parameter `skip_unique_checks` that can be used to disable unique constraint checks for indexes with implicit partition columns, including indexes in `REGIONAL BY ROW` tables. This should **only** be used if the application can guarantee uniqueness, for example, by using external UUID values or relying on a `unique_rowid()` default value. Incorrectly applying this setting when uniqueness is not guaranteed by the application could result in logically duplicate keys in different partitions of a unique index. #163378 +- Views now support the PostgreSQL-compatible `security_invoker` option. When set via `CREATE VIEW ... WITH (security_invoker)` or `ALTER VIEW SET (security_invoker = true)`, privilege checks on the underlying tables are performed as the querying user rather than the view owner. The `security_invoker` option can be reset with `ALTER VIEW ... RESET (security_invoker)`. #164184 +- Added the `ST_AsMVT` aggregate function to generate Mapbox Vector Tile (MVT) binary format from geospatial data, providing PostgreSQL/PostGIS compatibility for web mapping applications. #150663 +- Introduced the `information_schema.crdb_delete_statement_hints` built-in function, which accepts 2 kinds of payload: `row_id` (int): the primary key of `system.statement_hints`; `fingerprint` (string). The function returns the number of rows deleted. #163891 +- Added `to_date(text, text)` and `to_timestamp(text, text)` SQL functions that parse dates and timestamps from formatted strings using PostgreSQL-compatible format patterns. For example, `to_date('2023-03-15', 'YYYY-MM-DD')` returns a date, and `to_timestamp('2023-03-15 14:30:45', 'YYYY-MM-DD HH24:MI:SS')` returns a `timestamptz`. #164672 +- Added PostgreSQL-compatible numeric formatting functions `to_char(int, text)`, `to_char(float, text)`, `to_char(numeric, text)`, and `to_number(text, text)`. These functions format numbers as strings and parse formatted strings back to numbers using the PostgreSQL formatting syntax. #164672 +- Added support for importing Parquet files using the `IMPORT` statement. Parquet files can be imported from cloud storage URLs (`s3://`, `gs://`, `azure://`) or HTTP servers that support range requests (`Accept-Ranges: bytes`). This feature supports column-level compression formats (Snappy, GZIP, ZSTD, Brotli, etc.) as specified in the Parquet file format, but does not support additional file-level compression (e.g., `.parquet.gz` files). Nested Parquet types (lists, maps, structs) are not supported; only flat schemas with primitive types are supported at this time. #163991 +- CockroachDB now includes `information_schema.crdb_rewrite_inline_hints` statements in the `schema.sql` file of a statement diagnostics bundle for re-creating all the statement hints bound to the statement. The hint recreation statements are sorted in ascending order of the original hint creation time. #164164 +- `ALTER TABLE ... SET LOCALITY` is now fully executed using the declarative schema changer, improving reliability and consistency with other schema change operations. #161763 +- During an `INSPECT` run, a new check validates unique column values in `REGIONAL BY ROW` tables. #164449 +- Active Session History tables are now accessible via `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history`, in addition to the existing `crdb_internal` tables. This improves discoverability when browsing `information_schema` for available metadata. #164969

Operational changes

-- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. [#164827][#164827] -- Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. [#165093][#165093] -- Previously, altering a changefeed to add a table with an initial scan during a schema change backfill, or while the changefeed had lagging ranges, would sometimes be rejected. This is no longer the case. [#164433][#164433] +- Promoted the following admission control metrics to `ESSENTIAL` status, making them more discoverable in monitoring dashboards and troubleshooting workflows: `admission.wait_durations.*` (`sql-kv-response`, `sql-sql-response`, `elastic-stores`, `elastic-cpu`), `admission.granter.*_exhausted_duration.kv` (`slots`, `io_tokens`, `elastic_io_tokens`), `admission.elastic_cpu.nanos_exhausted_duration`, `kvflowcontrol.eval_wait.*.duration` (`elastic`, `regular`), and `kvflowcontrol.send_queue.bytes`. These metrics track admission control wait times, resource exhaustion, and replication flow control, providing visibility into cluster health and performance throttling. #164827 +- Added periodic ASH workload summary logging to the `OPS` channel. Two new cluster settings, `obs.ash.log_interval` (default: `10m`) and `obs.ash.log_top_n` (default: `10`), control how often and how many entries are emitted. Each summary reports the most frequently sampled workloads grouped by event type, event name, and workload ID, providing visibility into workload patterns that previously existed only in memory. #165093 +- Previously, altering a changefeed to add a table with an initial scan during a schema change backfill, or while the changefeed had lagging ranges, would sometimes be rejected. This is no longer the case. #164433

Bug fixes

-- JWT authentication now returns a clear error when HTTP requests to fetch JWKS or OpenID configuration return non-`2xx` status codes, instead of silently passing the response body to the JSON parser. [#158294][#158294] -- Fixed an issue where `ORDER BY` expressions containing subqueries with non-default `NULLS` ordering (e.g., `NULLS LAST` for `ASC`, `NULLS FIRST` for `DESC`) could cause an error during query planning. [#163230][#163230] -- Fixed a bug that caused `ALTER INDEX ... PARTITION BY` statements to fail on a nonexistent index even if `IF EXISTS` was used. [#163378][#163378] -- Fixed a bug where incremental backups taken after downgrading a mixed-version cluster to v25.4 could result in inconsistent backup indexes. [#164301][#164301] -- Altering a non-scan-only changefeed to add a target with `initial_scan='only'` now returns an error instead of not doing a scan and adding the target to the watched targets list. [#164433][#164433] -- Fixed a bug where adding a target without an initial scan, dropping that same target, and then adding it again with an initial scan would result in the target being added without an initial scan. [#164433][#164433] -- Fixed a bug where altering a changefeed to add a table with an initial scan during a schema change backfill or while the changefeed had lagging ranges would sometimes be rejected. [#164433][#164433] -- Fixed a bug where creating a table with a user-defined type column failed when the user had `USAGE` privilege on the base type but not on its implicit array type. The array type now inherits privileges from the base type, matching PostgreSQL behavior. [#164471][#164471] -- `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. [#164557][#164557] -- Fixed a bug in `appBatchStats.merge` where the `numEmptyEntries` field was not being properly accumulated when merging statistics. This could result in incorrect statistics tracking for empty Raft log entries. [#164671][#164671] -- Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. [#164739][#164739] -- Fixed a bug where `RESTORE` with `skip_missing_foreign_keys` could fail with an internal error if the restored table had an in-progress schema change that added a foreign key constraint whose referenced table was not included in the restore. [#164757][#164757] -- Fixed a bug introduced in v25.4+ where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` to lower than `500ms` is **not** recommended as it may cause degraded changefeed performance. [#164765][#164765] -- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with `ON` filter that is mostly `false`. [#164879][#164879] -- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. [#164881][#164881] -- Changefeed retry backoff now resets when the changefeed's resolved timestamp (high-water mark) advances between retries, in addition to the existing time-based reset (configured by `changefeed.retry_backoff_reset`). This prevents transient rolling restarts from causing changefeeds to fall behind because of excessive backoff. [#164933][#164933] -- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. [#164942][#164942] -- Fixed a bug that had previously allowed the primary and secondary to be in separate super regions. [#164943][#164943] -- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. [#165260][#165260] -- The `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history` views now include the `app_name` column, matching the underlying `crdb_internal` tables. [#165367][#165367] +- JWT authentication now returns a clear error when HTTP requests to fetch JWKS or OpenID configuration return non-`2xx` status codes, instead of silently passing the response body to the JSON parser. #158294 +- Fixed an issue where `ORDER BY` expressions containing subqueries with non-default `NULLS` ordering (e.g., `NULLS LAST` for `ASC`, `NULLS FIRST` for `DESC`) could cause an error during query planning. #163230 +- Fixed a bug that caused `ALTER INDEX ... PARTITION BY` statements to fail on a nonexistent index even if `IF EXISTS` was used. #163378 +- Fixed a bug where incremental backups taken after downgrading a mixed-version cluster to v25.4 could result in inconsistent backup indexes. #164301 +- Altering a non-scan-only changefeed to add a target with `initial_scan='only'` now returns an error instead of not doing a scan and adding the target to the watched targets list. #164433 +- Fixed a bug where adding a target without an initial scan, dropping that same target, and then adding it again with an initial scan would result in the target being added without an initial scan. #164433 +- Fixed a bug where altering a changefeed to add a table with an initial scan during a schema change backfill or while the changefeed had lagging ranges would sometimes be rejected. #164433 +- Fixed a bug where creating a table with a user-defined type column failed when the user had `USAGE` privilege on the base type but not on its implicit array type. The array type now inherits privileges from the base type, matching PostgreSQL behavior. #164471 +- `ALTER TABLE ... ALTER PRIMARY KEY USING COLUMNS (col) USING HASH` is now correctly treated as a no-op when the table already has a matching hash-sharded primary key, instead of attempting an unnecessary schema change. #164557 +- Fixed a bug in `appBatchStats.merge` where the `numEmptyEntries` field was not being properly accumulated when merging statistics. This could result in incorrect statistics tracking for empty Raft log entries. #164671 +- Fixed a bug where `ALTER TABLE ... ALTER COLUMN ... SET DATA TYPE` from an unbounded string or bit type to a bounded type with a length `>= 64` (for example, `STRING` to `STRING(100)`) would skip validating existing data against the new length constraint. This could leave rows in the table that violate the column's type, with values longer than the specified limit. #164739 +- Fixed a bug where `RESTORE` with `skip_missing_foreign_keys` could fail with an internal error if the restored table had an in-progress schema change that added a foreign key constraint whose referenced table was not included in the restore. #164757 +- Fixed a bug introduced in v25.4+ where setting `min_checkpoint_frequency` to `0` prevented changefeeds from advancing their resolved timestamp (high-water mark) and emitting resolved messages. Note that setting `min_checkpoint_frequency` to lower than `500ms` is **not** recommended as it may cause degraded changefeed performance. #164765 +- Fixed a bug where CockroachDB did not always promptly respond to the statement timeout when performing a hash join with `ON` filter that is mostly `false`. #164879 +- Fixed a bug where `IMPORT` error messages could include unredacted cloud storage credentials from the source URI. Credentials are now stripped from URIs before they appear in error messages. #164881 +- Changefeed retry backoff now resets when the changefeed's resolved timestamp (high-water mark) advances between retries, in addition to the existing time-based reset (configured by `changefeed.retry_backoff_reset`). This prevents transient rolling restarts from causing changefeeds to fall behind because of excessive backoff. #164933 +- Fixed a rare race condition where `SHOW CREATE TABLE` could fail with a `"relation does not exist"` error if a table referenced by a foreign key was being concurrently dropped. #164942 +- Fixed a bug that had previously allowed the primary and secondary to be in separate super regions. #164943 +- Fixed a bug that could cause row sampling for table statistics to crash a node due to a data race when processing a collated string column with values larger than 400 bytes. This bug has existed since before v23.1. #165260 +- The `information_schema.crdb_node_active_session_history` and `information_schema.crdb_cluster_active_session_history` views now include the `app_name` column, matching the underlying `crdb_internal` tables. #165367 -[#163378]: https://github.com/cockroachdb/cockroach/pull/163378 -[#164403]: https://github.com/cockroachdb/cockroach/pull/164403 -[#164909]: https://github.com/cockroachdb/cockroach/pull/164909 -[#164827]: https://github.com/cockroachdb/cockroach/pull/164827 -[#163891]: https://github.com/cockroachdb/cockroach/pull/163891 -[#163866]: https://github.com/cockroachdb/cockroach/pull/163866 -[#164301]: https://github.com/cockroachdb/cockroach/pull/164301 -[#164433]: https://github.com/cockroachdb/cockroach/pull/164433 -[#164739]: https://github.com/cockroachdb/cockroach/pull/164739 -[#164671]: https://github.com/cockroachdb/cockroach/pull/164671 -[#163400]: https://github.com/cockroachdb/cockroach/pull/163400 -[#163991]: https://github.com/cockroachdb/cockroach/pull/163991 -[#164164]: https://github.com/cockroachdb/cockroach/pull/164164 -[#164444]: https://github.com/cockroachdb/cockroach/pull/164444 -[#164672]: https://github.com/cockroachdb/cockroach/pull/164672 -[#165382]: https://github.com/cockroachdb/cockroach/pull/165382 -[#164557]: https://github.com/cockroachdb/cockroach/pull/164557 -[#164765]: https://github.com/cockroachdb/cockroach/pull/164765 -[#164874]: https://github.com/cockroachdb/cockroach/pull/164874 -[#164879]: https://github.com/cockroachdb/cockroach/pull/164879 -[#165260]: https://github.com/cockroachdb/cockroach/pull/165260 -[#164664]: https://github.com/cockroachdb/cockroach/pull/164664 -[#164184]: https://github.com/cockroachdb/cockroach/pull/164184 -[#164969]: https://github.com/cockroachdb/cockroach/pull/164969 -[#163230]: https://github.com/cockroachdb/cockroach/pull/163230 -[#164468]: https://github.com/cockroachdb/cockroach/pull/164468 -[#164129]: https://github.com/cockroachdb/cockroach/pull/164129 -[#164881]: https://github.com/cockroachdb/cockroach/pull/164881 -[#164933]: https://github.com/cockroachdb/cockroach/pull/164933 -[#164942]: https://github.com/cockroachdb/cockroach/pull/164942 -[#150663]: https://github.com/cockroachdb/cockroach/pull/150663 -[#161763]: https://github.com/cockroachdb/cockroach/pull/161763 -[#165367]: https://github.com/cockroachdb/cockroach/pull/165367 -[#164449]: https://github.com/cockroachdb/cockroach/pull/164449 -[#165227]: https://github.com/cockroachdb/cockroach/pull/165227 -[#165093]: https://github.com/cockroachdb/cockroach/pull/165093 -[#158294]: https://github.com/cockroachdb/cockroach/pull/158294 -[#164471]: https://github.com/cockroachdb/cockroach/pull/164471 -[#164757]: https://github.com/cockroachdb/cockroach/pull/164757 -[#164943]: https://github.com/cockroachdb/cockroach/pull/164943 diff --git a/src/current/_includes/releases/v26.2/v26.2.0-beta.1.md b/src/current/_includes/releases/v26.2/v26.2.0-beta.1.md index e947820000a..c20bcb8c1b2 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0-beta.1.md +++ b/src/current/_includes/releases/v26.2/v26.2.0-beta.1.md @@ -6,9 +6,9 @@ Release Date: March 25, 2026

Backward-incompatible changes

-- Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. [#165992][#165992] -- The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. [#164561][#164561] -- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. [#166159][#166159] +- Added the `TEMPORARY` database privilege, which controls whether users can create temporary tables and views. On new databases, this privilege is granted to the `public` role by default, matching PostgreSQL behavior. #165992 +- The `cockroach encode-uri` command has been merged into the `cockroach convert-url` command and `encode-uri` has been deprecated. As a result, the flags `--inline`, `--database`, `--user`, `--password`, `--cluster`, `--certs-dir`, `--ca-cert`, `--cert`, and `--key` have been added to `convert-url`. #164561 +- Statement diagnostics requests with `sampling_probability` and `expires_at` now collect up to 10 bundles (configurable via `sql.stmt_diagnostics.max_bundles_per_request`) instead of a single bundle. Set the cluster setting to `1` to restore single-bundle behavior. #166159

Security updates

@@ -22,50 +22,33 @@ Release Date: March 25, 2026 - Multiple identity attributes: A single certificate can contain multiple SAN entries (e.g., URI for service identity, DNS for hostname, IP for network location), providing flexible authentication options. - This authentication method works across both SQL client connections and internal RPC communication between cluster nodes, ensuring consistent identity verification throughout the system. Organizations using modern certificate management systems and service identity frameworks can now leverage their existing infrastructure for database authentication without requiring certificate reissuance or CN-based naming conventions. [#162583][#162583] + This authentication method works across both SQL client connections and internal RPC communication between cluster nodes, ensuring consistent identity verification throughout the system. Organizations using modern certificate management systems and service identity frameworks can now leverage their existing infrastructure for database authentication without requiring certificate reissuance or CN-based naming conventions. #162583

SQL language changes

-- Rewrite-inline-hints rules can now be scoped to a specific database, and will only apply to matching statements when the current database also matches. This database can be specified with an optional third argument to `information_schema.crdb_rewrite_inline_hints`. [#165457][#165457] -- Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. [#165744][#165744] -- Introduced a new built-in function `information_schema.crdb_enable_statement_hints`, which can be used to enable or disable statement hints by hint ID or by statement fingerprint. [#165457][#165457] -- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. [#165849][#165849] -- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. [#165397][#165397] -- `SHOW STATEMENT HINTS` now includes `database` and `enabled` columns in its output. The `database` column indicates which database the hint applies to, and the `enabled` column indicates whether the hint is active. [#165712][#165712] -- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. [#165727][#165727] -- Added a `workload_type` column to the `crdb_internal.node_active_session_history` and `crdb_internal.cluster_active_session_history` virtual tables, as well as the corresponding `information_schema` views. The column exposes the type of workload being sampled, with possible values `STATEMENT`, `JOB`, `SYSTEM`, or `UNKNOWN`. [#165866][#165866] +- Rewrite-inline-hints rules can now be scoped to a specific database, and will only apply to matching statements when the current database also matches. This database can be specified with an optional third argument to `information_schema.crdb_rewrite_inline_hints`. #165457 +- Added support for the `aclitem` type and the `makeaclitem` and `acldefault` built-in functions for PostgreSQL compatibility. The existing `aclexplode` function, which previously always returned no rows, now correctly parses ACL strings and returns the individual privilege grants they contain. #165744 +- Introduced a new built-in function `information_schema.crdb_enable_statement_hints`, which can be used to enable or disable statement hints by hint ID or by statement fingerprint. #165457 +- Added the `pg_get_triggerdef` builtin function, which returns the `CREATE TRIGGER` statement for a given trigger OID. This improves PostgreSQL compatibility for databases that contain triggers. #165849 +- `SHOW ALL` now returns a third column, `description`, containing a human-readable description of each session variable. This matches the PostgreSQL behavior of `SHOW ALL`. #165397 +- `SHOW STATEMENT HINTS` now includes `database` and `enabled` columns in its output. The `database` column indicates which database the hint applies to, and the `enabled` column indicates whether the hint is active. #165712 +- The `tableoid` system column is now supported on virtual tables such as those in `pg_catalog` and `information_schema`. This improves compatibility with PostgreSQL tools like `pg_dump` that reference `tableoid` in their introspection queries. #165727 +- Added a `workload_type` column to the `crdb_internal.node_active_session_history` and `crdb_internal.cluster_active_session_history` virtual tables, as well as the corresponding `information_schema` views. The column exposes the type of workload being sampled, with possible values `STATEMENT`, `JOB`, `SYSTEM`, or `UNKNOWN`. #165866

Operational changes

-- Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. [#164514][#164514] -- Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. [#165725][#165725] -- When hash-based redaction is enabled in the logging configuration, usernames in authentication logs now produce deterministic hashes instead of being fully redacted. This lets support engineers correlate the same user across multiple log entries without revealing the actual values. [#165804][#165804] +- Added the opt-in cluster setting `server.oidc_authentication.tls_insecure_skip_verify.enabled` to skip TLS certificate verification for OIDC provider connections. #164514 +- Red Hat certified CockroachDB container images are now published as multi-arch manifests supporting `linux/amd64`, `linux/arm64`, and `linux/s390x`. Previously only `linux/amd64` was published to the Red Hat registry. #165725 +- When hash-based redaction is enabled in the logging configuration, usernames in authentication logs now produce deterministic hashes instead of being fully redacted. This lets support engineers correlate the same user across multiple log entries without revealing the actual values. #165804

Command-line changes

-- Added the `--exclude-log-severities` flag to `cockroach debug zip` that filters log entries by severity server-side. For example, `--exclude-log-severities=INFO` excludes all `INFO`-level log entries from the collected log files, which can significantly reduce zip file size for large clusters. Valid severity names are `INFO`, `WARNING`, `ERROR`, and `FATAL`. The flag accepts a comma-delimited list or can be specified multiple times. [#165802][#165802] -- Added the `--format` option to `cockroach convert-url`, which allows users to specify the type of connection URL to output. Supported formats are `pq`, `dsn`, `jdbc`, and `crdb`. [#164561][#164561] +- Added the `--exclude-log-severities` flag to `cockroach debug zip` that filters log entries by severity server-side. For example, `--exclude-log-severities=INFO` excludes all `INFO`-level log entries from the collected log files, which can significantly reduce zip file size for large clusters. Valid severity names are `INFO`, `WARNING`, `ERROR`, and `FATAL`. The flag accepts a comma-delimited list or can be specified multiple times. #165802 +- Added the `--format` option to `cockroach convert-url`, which allows users to specify the type of connection URL to output. Supported formats are `pq`, `dsn`, `jdbc`, and `crdb`. #164561

Bug fixes

-- Fixed a bug where temporary tables created in one session could fail to appear in `pg_catalog` queries from another session because the parent temporary schema could not be resolved by ID. [#165395][#165395] -- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. [#165551][#165551] - - -[#165727]: https://github.com/cockroachdb/cockroach/pull/165727 -[#165866]: https://github.com/cockroachdb/cockroach/pull/165866 -[#166159]: https://github.com/cockroachdb/cockroach/pull/166159 -[#164561]: https://github.com/cockroachdb/cockroach/pull/164561 -[#165551]: https://github.com/cockroachdb/cockroach/pull/165551 -[#165457]: https://github.com/cockroachdb/cockroach/pull/165457 -[#165712]: https://github.com/cockroachdb/cockroach/pull/165712 -[#165992]: https://github.com/cockroachdb/cockroach/pull/165992 -[#165725]: https://github.com/cockroachdb/cockroach/pull/165725 -[#165804]: https://github.com/cockroachdb/cockroach/pull/165804 -[#165395]: https://github.com/cockroachdb/cockroach/pull/165395 -[#165397]: https://github.com/cockroachdb/cockroach/pull/165397 -[#165849]: https://github.com/cockroachdb/cockroach/pull/165849 -[#165744]: https://github.com/cockroachdb/cockroach/pull/165744 -[#164514]: https://github.com/cockroachdb/cockroach/pull/164514 -[#162583]: https://github.com/cockroachdb/cockroach/pull/162583 -[#165802]: https://github.com/cockroachdb/cockroach/pull/165802 +- Fixed a bug where temporary tables created in one session could fail to appear in `pg_catalog` queries from another session because the parent temporary schema could not be resolved by ID. #165395 +- Fixed a bug in the legacy schema changer where rolling back a `CREATE TABLE` with inline `FOREIGN KEY` constraints could leave orphaned foreign key back-references on the referenced table, causing descriptor validation errors. #165551 + + diff --git a/src/current/_includes/releases/v26.2/v26.2.0-beta.2.md b/src/current/_includes/releases/v26.2/v26.2.0-beta.2.md index 4f97968e0d1..7fc38263e41 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0-beta.2.md +++ b/src/current/_includes/releases/v26.2/v26.2.0-beta.2.md @@ -6,25 +6,18 @@ Release Date: April 1, 2026

SQL language changes

-- `EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. [#166129][#166129] +- `EXPLAIN` and `EXPLAIN ANALYZE` now display a `table stats mode` field (`canary` or `stable`) when the `sql.stats.canary_fraction` cluster setting is greater than 0, indicating which table statistics were used for query planning. Scan nodes for tables with active canary stats also show the configured canary window duration. #166129

Operational changes

-- A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). [#166555][#166555] +- A new cluster setting, `server.gc_assist.enabled`, allows operators to dynamically disable GC assist in CockroachDB's forked Go runtime. By default, it follows the `GODEBUG=gcnoassist` flag. A new metric, `sys.gc.assist.enabled`, reports the current state (`1` = enabled, `0` = disabled). #166555

Bug fixes

-- Fixed a bug where running `EXPLAIN ANALYZE (DEBUG)` on a query that invokes a UDF with many blocks could cause out-of-memory errors (OOMs). [#166132][#166132] -- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. [#166183][#166183] -- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. [#166223][#166223] -- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. [#166325][#166325] -- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. [#166705][#166705] +- Fixed a bug where running `EXPLAIN ANALYZE (DEBUG)` on a query that invokes a UDF with many blocks could cause out-of-memory errors (OOMs). #166132 +- Fixed a bug where restoring a database backup containing default privileges that referenced non-existent users would leave dangling user references in the restored database descriptor. #166183 +- Fixed a bug where rolling back a `CREATE TABLE` that referenced user-defined types or sequences would leave orphaned back-references on the type and sequence descriptors, causing them to appear in `crdb_internal.invalid_objects` after the table was GC'd. #166223 +- Fixed a bug where concurrent updates to a table using multiple column families during a partial index creation could result in data loss, incorrect `NULL` values, or validation failures in the resulting index. #166325 +- Fixed a crash (`traceRegion: alloc too large`) that could occur when Go's execution tracer was enabled and a range cache lookup used a key longer than about 64 KB. #166705 -[#166132]: https://github.com/cockroachdb/cockroach/pull/166132 -[#166183]: https://github.com/cockroachdb/cockroach/pull/166183 -[#166223]: https://github.com/cockroachdb/cockroach/pull/166223 -[#166325]: https://github.com/cockroachdb/cockroach/pull/166325 -[#166705]: https://github.com/cockroachdb/cockroach/pull/166705 -[#166129]: https://github.com/cockroachdb/cockroach/pull/166129 -[#166555]: https://github.com/cockroachdb/cockroach/pull/166555 diff --git a/src/current/_includes/releases/v26.2/v26.2.0-beta.3.md b/src/current/_includes/releases/v26.2/v26.2.0-beta.3.md index a25df87a777..952d3900dac 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0-beta.3.md +++ b/src/current/_includes/releases/v26.2/v26.2.0-beta.3.md @@ -6,58 +6,38 @@ Release Date: April 8, 2026

Backward-incompatible changes

-- User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. [#167023][#167023] -- `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. [#167419][#167419] +- User-defined views that reference `crdb_internal` virtual tables now enforce unsafe access checks. To restore the previous behavior, set the session variable `allow_unsafe_internals` or the cluster setting `sql.override.allow_unsafe_internals.enabled` to `true`. #167023 +- `REFRESH MATERIALIZED VIEW` now evaluates row-level security (RLS) policies using the view owner's identity instead of the invoker's, matching PostgreSQL's definer semantics. #167419

Security updates

-- Removed an overly restrictive TLS curve preference that limited FIPS mode to P-256. CockroachDB now uses Go's native FIPS curve selection, improving interoperability with clients that prefer other FIPS curves. [#166793][#166793] +- Removed an overly restrictive TLS curve preference that limited FIPS mode to P-256. CockroachDB now uses Go's native FIPS curve selection, improving interoperability with clients that prefer other FIPS curves. #166793

SQL language changes

-- Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. [#166471][#166471] -- Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. [#166855][#166855] -- Aggregation function `ST_AsMVT` can now also be used as a window function. [#166860][#166860] -- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. [#166920][#166920] -- The `information_schema.crdb_delete_statement_hints` built-in function now accepts an optional second `database` argument to delete only hints scoped to a specific database. [#167192][#167192] -- The `information_schema.crdb_enable_statement_hints` built-in function now accepts an optional third `database` argument to enable or disable only hints scoped to a specific database. [#167192][#167192] -- Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. [#167405][#167405] +- Added a new cluster setting, `sql.schema.auto_unlock.enabled`, that controls whether DDL operations automatically unlock `schema_locked` tables. When set to `false`, DDL on schema-locked tables is blocked unless the user manually unlocks the table first. This allows customers using LDR to enforce `schema_locked` as a hard lock that prevents user-initiated DDL. The default is `true`, preserving existing behavior. #166471 +- Added a new cluster setting `sql.prepared_transactions.unsafe.enabled` (default: `false`) that controls whether `PREPARE TRANSACTION` statements are accepted. This setting is marked unsafe and requires the unsafe setting interlock to change. When disabled, attempting to prepare a transaction returns an error. `COMMIT PREPARED` and `ROLLBACK PREPARED` remain available regardless of this setting to allow cleanup of existing prepared transactions. #166855 +- Aggregation function `ST_AsMVT` can now also be used as a window function. #166860 +- `CREATE CHANGEFEED FOR DATABASE` now returns an error stating that the feature is not implemented. #166920 +- The `information_schema.crdb_delete_statement_hints` built-in function now accepts an optional second `database` argument to delete only hints scoped to a specific database. #167192 +- The `information_schema.crdb_enable_statement_hints` built-in function now accepts an optional third `database` argument to enable or disable only hints scoped to a specific database. #167192 +- Setting `skip_unique_checks = true` on an index now emits a notice warning that unique constraint enforcement is bypassed, with a pointer to the `INSPECT` documentation. #167405

Operational changes

-- Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. [#166829][#166829] -- Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. [#166740][#166740] +- Added two new metrics, `auth.cert.san.conn.total` and `auth.cert.san.conn.success`, to track SAN-based certificate authentication attempts and successes. #166829 +- Added a new cluster setting `changefeed.kafka.max_request_size` and a per-changefeed `Flush.MaxBytes` option in the Kafka sink config to control the maximum size of record batches sent to Kafka by the v2 sink. Lowering this from the default of 256 MiB can prevent spurious message-too-large errors when multiple batches are coalesced into a single broker request. #166740

Bug fixes

-- Fixed a data race that could cause certificate expiration metrics (`security.certificate.expiration.node-client`, `security.certificate.expiration.client-tenant`, `security.certificate.expiration.ca-client-tenant` and their TTL counterparts) to not update after certificate rotation via `SIGHUP`. [#166664][#166664] -- Fixed a bug where `ALTER FUNCTION ... RENAME TO` and `ALTER PROCEDURE ... RENAME TO` could create duplicate functions in non-public schemas. [#166681][#166681] -- The PCR job now switches into the cutover phase more promptly after a failover is requested, terminating the replication phase more quickly and more reliably when components of the ingestion process are hung due to network errors. [#166778][#166778] -- Fixed a bug where descriptor version fetching could be incorrectly throttled by the elastic CPU limiter, potentially leading to increased query latency or timeouts under high CPU load. [#166810][#166810] -- Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. [#167112][#167112] -- Fixed a bug where transient I/O errors (such as cloud storage network timeouts) during split or merge trigger evaluation were misidentified as replica corruption, causing the node to crash. These errors now correctly fail the operation, which is retried automatically. [#167377][#167377] -- Fixed a bug where executing a mutation in a subquery (e.g., as a CTE) could cause the "rows written" metrics like `sql.statements.index_rows_written.count` and `sql.statements.index_bytes_written.count` to not be incremented correctly. [#167432][#167432] -- Fixed a bug where converting a table from `REGIONAL BY ROW` to `GLOBAL` would not clear the `skip_unique_checks` storage parameter on the primary key, even though implicit partitioning was removed. [#167484][#167484] -- Fixed a bug where the `lock_timeout` and `deadlock_timeout` session settings were not honored by FK existence checks performed during insert fast path execution. This could cause inserts to block indefinitely on conflicting locks instead of returning a timeout error. [#167532][#167532] - - -[#166471]: https://github.com/cockroachdb/cockroach/pull/166471 -[#167377]: https://github.com/cockroachdb/cockroach/pull/167377 -[#167484]: https://github.com/cockroachdb/cockroach/pull/167484 -[#166681]: https://github.com/cockroachdb/cockroach/pull/166681 -[#167112]: https://github.com/cockroachdb/cockroach/pull/167112 -[#166855]: https://github.com/cockroachdb/cockroach/pull/166855 -[#167192]: https://github.com/cockroachdb/cockroach/pull/167192 -[#167405]: https://github.com/cockroachdb/cockroach/pull/167405 -[#166664]: https://github.com/cockroachdb/cockroach/pull/166664 -[#166810]: https://github.com/cockroachdb/cockroach/pull/166810 -[#167432]: https://github.com/cockroachdb/cockroach/pull/167432 -[#167023]: https://github.com/cockroachdb/cockroach/pull/167023 -[#166860]: https://github.com/cockroachdb/cockroach/pull/166860 -[#166829]: https://github.com/cockroachdb/cockroach/pull/166829 -[#166740]: https://github.com/cockroachdb/cockroach/pull/166740 -[#167532]: https://github.com/cockroachdb/cockroach/pull/167532 -[#166793]: https://github.com/cockroachdb/cockroach/pull/166793 -[#166920]: https://github.com/cockroachdb/cockroach/pull/166920 -[#166778]: https://github.com/cockroachdb/cockroach/pull/166778 -[#167419]: https://github.com/cockroachdb/cockroach/pull/167419 +- Fixed a data race that could cause certificate expiration metrics (`security.certificate.expiration.node-client`, `security.certificate.expiration.client-tenant`, `security.certificate.expiration.ca-client-tenant` and their TTL counterparts) to not update after certificate rotation via `SIGHUP`. #166664 +- Fixed a bug where `ALTER FUNCTION ... RENAME TO` and `ALTER PROCEDURE ... RENAME TO` could create duplicate functions in non-public schemas. #166681 +- The PCR job now switches into the cutover phase more promptly after a failover is requested, terminating the replication phase more quickly and more reliably when components of the ingestion process are hung due to network errors. #166778 +- Fixed a bug where descriptor version fetching could be incorrectly throttled by the elastic CPU limiter, potentially leading to increased query latency or timeouts under high CPU load. #166810 +- Context cancellation is now surfaced if a `statement_timeout` occurs while waiting for a schema change. #167112 +- Fixed a bug where transient I/O errors (such as cloud storage network timeouts) during split or merge trigger evaluation were misidentified as replica corruption, causing the node to crash. These errors now correctly fail the operation, which is retried automatically. #167377 +- Fixed a bug where executing a mutation in a subquery (e.g., as a CTE) could cause the "rows written" metrics like `sql.statements.index_rows_written.count` and `sql.statements.index_bytes_written.count` to not be incremented correctly. #167432 +- Fixed a bug where converting a table from `REGIONAL BY ROW` to `GLOBAL` would not clear the `skip_unique_checks` storage parameter on the primary key, even though implicit partitioning was removed. #167484 +- Fixed a bug where the `lock_timeout` and `deadlock_timeout` session settings were not honored by FK existence checks performed during insert fast path execution. This could cause inserts to block indefinitely on conflicting locks instead of returning a timeout error. #167532 + + diff --git a/src/current/_includes/releases/v26.2/v26.2.0-rc.1.md b/src/current/_includes/releases/v26.2/v26.2.0-rc.1.md index 27f65def9e8..ecfe6bc92b4 100644 --- a/src/current/_includes/releases/v26.2/v26.2.0-rc.1.md +++ b/src/current/_includes/releases/v26.2/v26.2.0-rc.1.md @@ -8,17 +8,14 @@ Release Date: April 15, 2026 - Exposed the following settings for canary table statistics: - Cluster setting `sql.stats.canary_fraction`: probability that table statistics will use canary mode (i.e., always use the freshest stats) instead of stable mode (i.e., use the second-freshest stats) for query planning [0.0-1.0]. - - Session variable `canary_stats_mode`: When `sql.stats.canary_fraction` is greater than `0`, controls which table statistics are used for query planning on the current session: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. [#167944][#167944] + - Session variable `canary_stats_mode`: When `sql.stats.canary_fraction` is greater than `0`, controls which table statistics are used for query planning on the current session: `on` always uses the newest (canary) stats immediately when they are collected, `off` delays using new stats until they outlive the canary window, and `auto` selects probabilistically based on the canary fraction. Has no effect when `sql.stats.canary_fraction` is `0`. #167944

Bug fixes

-- Fixed a bug where CockroachDB might not have respected the table-level parameters `sql_stats_automatic_full_collection_enabled` and `sql_stats_automatic_partial_collection_enabled` and defaulted to using the corresponding cluster settings when deciding whether to perform automatic statistics collection on a table. [#167681][#167681] +- Fixed a bug where CockroachDB might not have respected the table-level parameters `sql_stats_automatic_full_collection_enabled` and `sql_stats_automatic_partial_collection_enabled` and defaulted to using the corresponding cluster settings when deciding whether to perform automatic statistics collection on a table. #167681

Performance improvements

-- Statement executions using canary stats will no longer use cached plans, which prevents cache thrashing but causes a slight increase in planning time over statement executions using stable stats. [#167503][#167503] +- Statement executions using canary stats will no longer use cached plans, which prevents cache thrashing but causes a slight increase in planning time over statement executions using stable stats. #167503 -[#167944]: https://github.com/cockroachdb/cockroach/pull/167944 -[#167681]: https://github.com/cockroachdb/cockroach/pull/167681 -[#167503]: https://github.com/cockroachdb/cockroach/pull/167503 diff --git a/src/current/_includes/v20.2/known-limitations/copy-from-clients.md b/src/current/_includes/v20.2/known-limitations/copy-from-clients.md index 4428aaf74f7..5b7ff8f5502 100644 --- a/src/current/_includes/v20.2/known-limitations/copy-from-clients.md +++ b/src/current/_includes/v20.2/known-limitations/copy-from-clients.md @@ -2,4 +2,4 @@ The built-in SQL shell provided with CockroachDB ([`cockroach sql`](cockroach-sq To load data into CockroachDB, we recommend that you use an [`IMPORT`](import.html). If you must use a `COPY` statement, you can issue the statement from the [`psql` client](https://www.postgresql.org/docs/current/app-psql.html) command provided with PostgreSQL, or from another third-party client. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/16392) \ No newline at end of file +Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v20.2/known-limitations/copy-syntax.md b/src/current/_includes/v20.2/known-limitations/copy-syntax.md index 2150a7ba642..b64f3b02872 100644 --- a/src/current/_includes/v20.2/known-limitations/copy-syntax.md +++ b/src/current/_includes/v20.2/known-limitations/copy-syntax.md @@ -2,16 +2,16 @@ CockroachDB does not yet support the following `COPY` syntax: - `COPY ... TO`. To copy data from a CockroachDB cluster to a file, use an [`EXPORT`](export.html) statement. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/41608) + Tracking GitHub Issue - `COPY ... FROM CSV` - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/51891) + Tracking GitHub Issue - `COPY ... FROM STDIN` with a delimiter other than the default tab delimiter. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/16407) + Tracking GitHub Issue - `COPY ... FROM ... WHERE ` - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/54580) \ No newline at end of file + Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v20.2/known-limitations/old-multi-col-stats.md b/src/current/_includes/v20.2/known-limitations/old-multi-col-stats.md index 595be9c7209..4047ba82ff4 100644 --- a/src/current/_includes/v20.2/known-limitations/old-multi-col-stats.md +++ b/src/current/_includes/v20.2/known-limitations/old-multi-col-stats.md @@ -1,3 +1,3 @@ When a column is dropped from a multi-column index, the {% if page.name == "cost-based-optimizer.md" %} optimizer {% else %} [optimizer](cost-based-optimizer.html) {% endif %} will not collect new statistics for the deleted column. However, the optimizer never deletes the old [multi-column statistics](create-statistics.html#create-statistics-on-multiple-columns). This can cause a buildup of statistics in `system.table_statistics` leading the optimizer to use stale statistics, which could result in sub-optimal plans. To workaround this issue and avoid these scenarios, explicitly [delete those statistics](create-statistics.html#delete-statistics) from the `system.table_statistics` table. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/67407) + Tracking GitHub Issue diff --git a/src/current/_includes/v20.2/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v20.2/known-limitations/set-transaction-no-rollback.md index 4ab3661f4f7..1c45a935448 100644 --- a/src/current/_includes/v20.2/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v20.2/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/69396) +Tracking GitHub Issue diff --git a/src/current/_includes/v20.2/known-limitations/single-col-stats-deletion.md b/src/current/_includes/v20.2/known-limitations/single-col-stats-deletion.md index b8baa46c5d2..3382eec7b9e 100644 --- a/src/current/_includes/v20.2/known-limitations/single-col-stats-deletion.md +++ b/src/current/_includes/v20.2/known-limitations/single-col-stats-deletion.md @@ -1,3 +1,3 @@ [Single-column statistics](create-statistics.html#create-statistics-on-a-single-column) are not deleted when columns are dropped, which could cause minor performance issues. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/67407) + Tracking GitHub Issue diff --git a/src/current/_includes/v20.2/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v20.2/known-limitations/stats-refresh-upgrade.md index f54a08b3754..4df7d83e986 100644 --- a/src/current/_includes/v20.2/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v20.2/known-limitations/stats-refresh-upgrade.md @@ -1,3 +1,3 @@ The [automatic statistics refresher](cost-based-optimizer.html#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade](upgrade-cockroach-version.html). This could cause a problem, for example, if the upgrade moves from a version without [histograms](cost-based-optimizer.html#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/54816) + Tracking GitHub Issue diff --git a/src/current/_includes/v20.2/known-limitations/unordered-operations.md b/src/current/_includes/v20.2/known-limitations/unordered-operations.md index 8e54fc6848a..f91839359ca 100644 --- a/src/current/_includes/v20.2/known-limitations/unordered-operations.md +++ b/src/current/_includes/v20.2/known-limitations/unordered-operations.md @@ -4,4 +4,4 @@ Unordered aggregation operations do not support disk spilling, and are limited b Setting `--max-sql-memory` too high could result in performance problems due to increased memory consumption. {{site.data.alerts.end}} -See the [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/42485) for details. +See the GitHub tracking issue for details. diff --git a/src/current/_includes/v20.2/known-limitations/userfile-upload-non-recursive.md b/src/current/_includes/v20.2/known-limitations/userfile-upload-non-recursive.md index 19db5fde6a4..c20fc85b310 100644 --- a/src/current/_includes/v20.2/known-limitations/userfile-upload-non-recursive.md +++ b/src/current/_includes/v20.2/known-limitations/userfile-upload-non-recursive.md @@ -1 +1 @@ -- `cockroach userfile upload` does not not currently allow for recursive uploads from a directory. This feature will be present with the `--recursive` flag in future versions. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/pull/65307) +- `cockroach userfile upload` does not not currently allow for recursive uploads from a directory. This feature will be present with the `--recursive` flag in future versions. Tracking GitHub Issue diff --git a/src/current/_includes/v20.2/misc/tooling.md b/src/current/_includes/v20.2/misc/tooling.md index a6d0a64b30b..3cb34b7d2e2 100644 --- a/src/current/_includes/v20.2/misc/tooling.md +++ b/src/current/_includes/v20.2/misc/tooling.md @@ -9,7 +9,7 @@ Cockroach Labs has partnered with open-source projects, vendors, and individuals Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling](transactions.html#client-side-intervention). For client-side transaction retry handling samples, see [Example Apps](example-apps.html). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community](community-tooling.html). @@ -19,21 +19,21 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Beta | N/A | N/A | | C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 4.1.3.1 | Beta | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) || -|| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) || +|| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | | Python | [psycopg2](https://www.psycopg.org/docs/install.html) | 2.8.6 | Full | N/A | [Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) || -|| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be 5.4.19)


3.13.2 (must be 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)](build-a-spring-app-with-cockroachdb-mybatis.html) || -|| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) || -|| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) || -|| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) || +|| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be 5.4.19)


3.13.2 (must be 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)](build-a-spring-app-with-cockroachdb-mybatis.html) || +|| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) || +|| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) || +|| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v20.2/orchestration/kubernetes-prometheus-alertmanager.md b/src/current/_includes/v20.2/orchestration/kubernetes-prometheus-alertmanager.md index 9f7fbd4136e..707dfdb52c6 100644 --- a/src/current/_includes/v20.2/orchestration/kubernetes-prometheus-alertmanager.md +++ b/src/current/_includes/v20.2/orchestration/kubernetes-prometheus-alertmanager.md @@ -90,7 +90,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated prometheus-operator 1/1 1 1 27s ~~~ -4. Use our [`prometheus.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/prometheus.yaml) file to create the various objects necessary to run a Prometheus instance: +4. Use our [`prometheus.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml) file to create the various objects necessary to run a Prometheus instance: {{site.data.alerts.callout_info}} By default, this manifest uses the secret name generated by the CockroachDB Kubernetes Operator. If you generated your own certificates and keys when starting CockroachDB, be sure that `ca.secret.name` matches the name of the node secret you created. @@ -99,7 +99,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ ~~~ @@ -137,14 +137,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ### Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 2. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -174,12 +174,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -4. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +4. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -204,12 +204,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -7. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +7. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/_includes/v20.2/orchestration/start-cockroachdb-helm-secure.md b/src/current/_includes/v20.2/orchestration/start-cockroachdb-helm-secure.md index 826e5ae33f2..203582ba82b 100644 --- a/src/current/_includes/v20.2/orchestration/start-cockroachdb-helm-secure.md +++ b/src/current/_includes/v20.2/orchestration/start-cockroachdb-helm-secure.md @@ -3,7 +3,7 @@ The CockroachDB Helm chart is undergoing maintenance for compatibility with Kube {{site.data.alerts.end}} {{site.data.alerts.callout_info}} -Secure CockroachDB deployments on Amazon EKS via Helm are [not yet supported](https://github.com/cockroachdb/cockroach/issues/38847). +Secure CockroachDB deployments on Amazon EKS via Helm are not yet supported. {{site.data.alerts.end}} 1. [Install the Helm client](https://helm.sh/docs/intro/install) (version 3.0 or higher) and add the `cockroachdb` chart repository: diff --git a/src/current/_includes/v20.2/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v20.2/orchestration/start-cockroachdb-insecure.md index 695d0832fce..ba581071373 100644 --- a/src/current/_includes/v20.2/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v20.2/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_danger}} @@ -40,11 +40,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 2. Modify the file wherever there is a `TODO` comment. @@ -85,12 +85,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -4. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +4. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v20.2/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v20.2/orchestration/start-cockroachdb-local-insecure.md index bebb6eb3062..c22f405666e 100644 --- a/src/current/_includes/v20.2/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v20.2/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -4. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +4. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v20.2/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v20.2/orchestration/start-cockroachdb-secure.md index 415e16323fc..00f4f929d25 100644 --- a/src/current/_includes/v20.2/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v20.2/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ #### Set up configuration file -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Allocate CPU and memory resources to CockroachDB on each pod. These settings should be appropriate for your workload. For more context on provisioning CPU and memory, see the [Production Checklist](recommended-production-settings.html#hardware). diff --git a/src/current/_includes/v20.2/orchestration/test-cluster-secure.md b/src/current/_includes/v20.2/orchestration/test-cluster-secure.md index b162949a7b5..b745ff3b529 100644 --- a/src/current/_includes/v20.2/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v20.2/orchestration/test-cluster-secure.md @@ -30,7 +30,7 @@ To use the built-in SQL client, you need to launch a pod that runs indefinitely {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ @@ -72,14 +72,14 @@ To use the built-in SQL client, you need to launch a pod that runs indefinitely
To use the built-in SQL client, you need to launch a pod that runs indefinitely with the `cockroach` binary inside it, get a shell into the pod, and then start the built-in SQL client. -1. From your local workstation, use our [`client-secure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/client-secure.yaml) file to launch a pod and keep it running indefinitely. +1. From your local workstation, use our [`client-secure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/client-secure.yaml) file to launch a pod and keep it running indefinitely. 1. Download the file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/client-secure.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/client-secure.yaml ~~~ 1. In the file, change `serviceAccountName: cockroachdb` to `serviceAccountName: my-release-cockroachdb`. diff --git a/src/current/_includes/v20.2/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v20.2/sql/savepoints-and-high-priority-transactions.md index 4b77f2dd561..d0085eedc76 100644 --- a/src/current/_includes/v20.2/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v20.2/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`](rollback-transaction.html#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction](transactions.html#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`](rollback-transaction.html#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction](transactions.html#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v23.1/backward-incompatible/alpha.1.md b/src/current/_includes/v23.1/backward-incompatible/alpha.1.md index d42f8070fb9..2b1d30e0e28 100644 --- a/src/current/_includes/v23.1/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v23.1/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v23.1/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v23.1/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v23.1/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v23.1/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v23.1/cdc/avro-udt-composite.md b/src/current/_includes/v23.1/cdc/avro-udt-composite.md index 33e621169a2..3c5caaf3ec1 100644 --- a/src/current/_includes/v23.1/cdc/avro-udt-composite.md +++ b/src/current/_includes/v23.1/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/cdc/csv-udt-composite.md b/src/current/_includes/v23.1/cdc/csv-udt-composite.md index 1cf920220d0..acd84838417 100644 --- a/src/current/_includes/v23.1/cdc/csv-udt-composite.md +++ b/src/current/_includes/v23.1/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/essential-metrics.md b/src/current/_includes/v23.1/essential-metrics.md index 83bbc469c57..7a05c06e1dc 100644 --- a/src/current/_includes/v23.1/essential-metrics.md +++ b/src/current/_includes/v23.1/essential-metrics.md @@ -181,4 +181,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Events to alert on]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#events-to-alert-on) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) \ No newline at end of file +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) \ No newline at end of file diff --git a/src/current/_includes/v23.1/faq/what-is-crdb.md b/src/current/_includes/v23.1/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v23.1/faq/what-is-crdb.md +++ b/src/current/_includes/v23.1/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v23.1/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v23.1/known-limitations/alter-changefeed-cdc-queries.md index cbfa5a818f3..6c02bffd16b 100644 --- a/src/current/_includes/v23.1/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v23.1/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. Tracking GitHub issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/cdc-execution-locality.md b/src/current/_includes/v23.1/known-limitations/cdc-execution-locality.md index 67f80c8e31d..45b96995cb2 100644 --- a/src/current/_includes/v23.1/known-limitations/cdc-execution-locality.md +++ b/src/current/_includes/v23.1/known-limitations/cdc-execution-locality.md @@ -1 +1 @@ -Changefeeds that use the [`execution_locality` option]({% link {{ page.version.version }}/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality) set to a [secondary region]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) could create a plan that assigns most of the ranges to an [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) on the coordinator node. This leads to an unbalanced plan and slow changefeed progress, particularly when the table is large and has many ranges. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/124822) \ No newline at end of file +Changefeeds that use the [`execution_locality` option]({% link {{ page.version.version }}/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality) set to a [secondary region]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) could create a plan that assigns most of the ranges to an [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) on the coordinator node. This leads to an unbalanced plan and slow changefeed progress, particularly when the table is large and has many ranges. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v23.1/known-limitations/cdc-queries-column-families.md index a68a7949824..fd6a60b41c7 100644 --- a/src/current/_includes/v23.1/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v23.1/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/cdc-queries.md b/src/current/_includes/v23.1/known-limitations/cdc-queries.md index 1fcc6988f1a..5ca18d6696d 100644 --- a/src/current/_includes/v23.1/known-limitations/cdc-queries.md +++ b/src/current/_includes/v23.1/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). Tracking GitHub issue +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. Tracking GitHub issue diff --git a/src/current/_includes/v23.1/known-limitations/cdc.md b/src/current/_includes/v23.1/known-limitations/cdc.md index 008fdb46867..89a1aa6165f 100644 --- a/src/current/_includes/v23.1/known-limitations/cdc.md +++ b/src/current/_includes/v23.1/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). Tracking GitHub Issue - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved-option) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. Tracking GitHub Issue +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. Tracking GitHub Issue +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved-option) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. Tracking GitHub Issue - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v23.1/known-limitations/changefeed-column-family-message.md index b8aafbe11dc..0c61edd8d82 100644 --- a/src/current/_includes/v23.1/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v23.1/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/copy-syntax.md b/src/current/_includes/v23.1/known-limitations/copy-syntax.md index 0c6c89299df..2bff2dca9b3 100644 --- a/src/current/_includes/v23.1/known-limitations/copy-syntax.md +++ b/src/current/_includes/v23.1/known-limitations/copy-syntax.md @@ -2,10 +2,10 @@ CockroachDB does not yet support the following `COPY` syntax: - Various `COPY` options (`FORMAT`, `FREEZE`, `QUOTE`, etc.). - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/85572) - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/85573) - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/85574) + - Tracking GitHub Issue + - Tracking GitHub Issue + - Tracking GitHub Issue - `COPY ... FROM ... WHERE `. - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/54580) + - Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/known-limitations/drop-owned-by-role-limitations.md b/src/current/_includes/v23.1/known-limitations/drop-owned-by-role-limitations.md index cfa718d4e2d..c2cef607345 100644 --- a/src/current/_includes/v23.1/known-limitations/drop-owned-by-role-limitations.md +++ b/src/current/_includes/v23.1/known-limitations/drop-owned-by-role-limitations.md @@ -1,4 +1,4 @@ -- If the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) for which you are trying to `DROP OWNED BY` was granted a [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) (i.e., using the [`GRANT SYSTEM ...`]({% link {{ page.version.version }}/grant.md %}#grant-system-level-privileges-on-the-entire-cluster) statement), the error shown below will be signalled. The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. For more information about this known limitation, see [cockroachdb/cockroach#88149](https://github.com/cockroachdb/cockroach/issues/88149). +- If the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) for which you are trying to `DROP OWNED BY` was granted a [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) (i.e., using the [`GRANT SYSTEM ...`]({% link {{ page.version.version }}/grant.md %}#grant-system-level-privileges-on-the-entire-cluster) statement), the error shown below will be signalled. The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. For more information about this known limitation, see cockroachdb/cockroach#88149. ~~~ ERROR: cannot perform drop owned by if role has synthetic privileges; foo has entries in system.privileges diff --git a/src/current/_includes/v23.1/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v23.1/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v23.1/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v23.1/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v23.1/known-limitations/restore-tables-non-multi-reg.md index 360f93f796f..89b723fdb02 100644 --- a/src/current/_includes/v23.1/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v23.1/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/known-limitations/restore-udf.md b/src/current/_includes/v23.1/known-limitations/restore-udf.md index 053a3b90df7..ce70aea2383 100644 --- a/src/current/_includes/v23.1/known-limitations/restore-udf.md +++ b/src/current/_includes/v23.1/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v23.1/known-limitations/set-transaction-no-rollback.md index c2e78c7d78a..22de1824dc1 100644 --- a/src/current/_includes/v23.1/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v23.1/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/69396) +Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/known-limitations/show-backup-locality-incremental-location.md b/src/current/_includes/v23.1/known-limitations/show-backup-locality-incremental-location.md index 428a78c0da0..db6cb6a6f9e 100644 --- a/src/current/_includes/v23.1/known-limitations/show-backup-locality-incremental-location.md +++ b/src/current/_includes/v23.1/known-limitations/show-backup-locality-incremental-location.md @@ -1 +1 @@ -{% if page.name == "show-backup.md" %}`SHOW BACKUP`{% else %}[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}){% endif %} can display backups taken with the `incremental_location` option **or** for [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}). It will not display backups for locality-aware backups taken with the `incremental_location` option. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/82912). \ No newline at end of file +{% if page.name == "show-backup.md" %}`SHOW BACKUP`{% else %}[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}){% endif %} can display backups taken with the `incremental_location` option **or** for [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}). It will not display backups for locality-aware backups taken with the `incremental_location` option. Tracking GitHub issue. \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/show-backup-symlink.md b/src/current/_includes/v23.1/known-limitations/show-backup-symlink.md index 1316e7d7667..d6d2c0c96a3 100644 --- a/src/current/_includes/v23.1/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v23.1/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.1/known-limitations/sql-cursors.md b/src/current/_includes/v23.1/known-limitations/sql-cursors.md index d1c42749661..802f577cfdc 100644 --- a/src/current/_includes/v23.1/known-limitations/sql-cursors.md +++ b/src/current/_includes/v23.1/known-limitations/sql-cursors.md @@ -1,9 +1,9 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [cockroachdb/cockroach#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [cockroachdb/cockroach#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [cockroachdb/cockroach#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [cockroachdb/cockroach#77101](https://github.com/cockroachdb/cockroach/issues/77101) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. cockroachdb/cockroach#77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. cockroachdb/cockroach#77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. cockroachdb/cockroach#77099 +- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. cockroachdb/cockroach#77101 - This syntax is accepted (but does not have any effect): {% include_cached copy-clipboard.html %} ~~~ sql @@ -19,6 +19,6 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor] DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar; COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction. ~~~ -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [cockroachdb/cockroach#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [cockroachdb/cockroach#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [cockroachdb/cockroach#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. cockroachdb/cockroach#77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. cockroachdb/cockroach#77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. cockroachdb/cockroach#77104 diff --git a/src/current/_includes/v23.1/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v23.1/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v23.1/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v23.1/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v23.1/known-limitations/userfile-upload-non-recursive.md b/src/current/_includes/v23.1/known-limitations/userfile-upload-non-recursive.md index 19db5fde6a4..c20fc85b310 100644 --- a/src/current/_includes/v23.1/known-limitations/userfile-upload-non-recursive.md +++ b/src/current/_includes/v23.1/known-limitations/userfile-upload-non-recursive.md @@ -1 +1 @@ -- `cockroach userfile upload` does not not currently allow for recursive uploads from a directory. This feature will be present with the `--recursive` flag in future versions. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/pull/65307) +- `cockroach userfile upload` does not not currently allow for recursive uploads from a directory. This feature will be present with the `--recursive` flag in future versions. Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/misc/tooling.md b/src/current/_includes/v23.1/misc/tooling.md index 4ade5aaeb60..a8e414e92f0 100644 --- a/src/current/_includes/v23.1/misc/tooling.md +++ b/src/current/_includes/v23.1/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/release-23.1/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.3.1 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v23.1/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v23.1/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v23.1/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v23.1/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v23.1/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v23.1/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v23.1/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v23.1/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v23.1/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v23.1/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v23.1/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v23.1/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v23.1/orchestration/test-cluster-secure.md b/src/current/_includes/v23.1/orchestration/test-cluster-secure.md index f96d779fd0a..68542cd3e3f 100644 --- a/src/current/_includes/v23.1/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v23.1/orchestration/test-cluster-secure.md @@ -44,7 +44,7 @@ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v23.1/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v23.1/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v23.1/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v23.1/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v23.1/sql/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v23.1/sql/cannot-refresh-materialized-views-inside-transactions.md index c78308152f6..f3407abc8d3 100644 --- a/src/current/_includes/v23.1/sql/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v23.1/sql/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/66008) + Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/sql/expression-indexes-cannot-reference-computed-columns.md b/src/current/_includes/v23.1/sql/expression-indexes-cannot-reference-computed-columns.md index 64d80e31fa3..bef1da01ea5 100644 --- a/src/current/_includes/v23.1/sql/expression-indexes-cannot-reference-computed-columns.md +++ b/src/current/_includes/v23.1/sql/expression-indexes-cannot-reference-computed-columns.md @@ -1,3 +1,3 @@ CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/67900) + Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/sql/expressions-as-on-conflict-targets.md b/src/current/_includes/v23.1/sql/expressions-as-on-conflict-targets.md index 882dd905b1d..e9cc6a63e8a 100644 --- a/src/current/_includes/v23.1/sql/expressions-as-on-conflict-targets.md +++ b/src/current/_includes/v23.1/sql/expressions-as-on-conflict-targets.md @@ -37,4 +37,4 @@ INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO UPDATE SET a = 10 HINT: try \h INSERT ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/67893) +Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/sql/jsonb-comparison.md b/src/current/_includes/v23.1/sql/jsonb-comparison.md index 7b4435d6fcc..d1b5fff746a 100644 --- a/src/current/_includes/v23.1/sql/jsonb-comparison.md +++ b/src/current/_includes/v23.1/sql/jsonb-comparison.md @@ -10,4 +10,4 @@ You cannot use comparison operators (such as `<` or `>`) on [`JSONB`]({% link {{ SQLSTATE: 22023 ~~~ - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/49144) + Tracking GitHub issue diff --git a/src/current/_includes/v23.1/sql/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v23.1/sql/locality-optimized-search-virtual-computed-columns.md index c016103b1e1..0d948a3dafe 100644 --- a/src/current/_includes/v23.1/sql/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v23.1/sql/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). Tracking GitHub Issue diff --git a/src/current/_includes/v23.1/sql/materialized-views-no-stats.md b/src/current/_includes/v23.1/sql/materialized-views-no-stats.md index e4d262b56ea..e6fa3103388 100644 --- a/src/current/_includes/v23.1/sql/materialized-views-no-stats.md +++ b/src/current/_includes/v23.1/sql/materialized-views-no-stats.md @@ -1,3 +1,3 @@ - The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/78181). + Tracking GitHub Issue. diff --git a/src/current/_includes/v23.1/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v23.1/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v23.1/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v23.1/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v23.1/sql/select-for-update-limitations.md b/src/current/_includes/v23.1/sql/select-for-update-limitations.md index 3b467ba8f18..3268104b080 100644 --- a/src/current/_includes/v23.1/sql/select-for-update-limitations.md +++ b/src/current/_includes/v23.1/sql/select-for-update-limitations.md @@ -5,6 +5,6 @@ If a lease transfer or range split/merge occurs on a range held by an unreplicat - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001` and the `restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). -We intend to improve the reliability of these locks. For details, see [cockroachdb/cockroach#75456](https://github.com/cockroachdb/cockroach/issues/75456). +We intend to improve the reliability of these locks. For details, see cockroachdb/cockroach#75456. Note that [serializable isolation]({% link {{ page.version.version }}/transactions.md %}#serializable-isolation) is preserved despite this limitation. diff --git a/src/current/_includes/v23.2/backward-incompatible/alpha.1.md b/src/current/_includes/v23.2/backward-incompatible/alpha.1.md index a93bed49fbe..5bef6ceedaa 100644 --- a/src/current/_includes/v23.2/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v23.2/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v23.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v23.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v23.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v23.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v23.2/cdc/avro-udt-composite.md b/src/current/_includes/v23.2/cdc/avro-udt-composite.md index 33e621169a2..3c5caaf3ec1 100644 --- a/src/current/_includes/v23.2/cdc/avro-udt-composite.md +++ b/src/current/_includes/v23.2/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/cdc/csv-udt-composite.md b/src/current/_includes/v23.2/cdc/csv-udt-composite.md index 1cf920220d0..acd84838417 100644 --- a/src/current/_includes/v23.2/cdc/csv-udt-composite.md +++ b/src/current/_includes/v23.2/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/essential-metrics.md b/src/current/_includes/v23.2/essential-metrics.md index 83bbc469c57..7a05c06e1dc 100644 --- a/src/current/_includes/v23.2/essential-metrics.md +++ b/src/current/_includes/v23.2/essential-metrics.md @@ -181,4 +181,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Events to alert on]({% link {{ page.version.version }}/monitoring-and-alerting.md %}#events-to-alert-on) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) \ No newline at end of file +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) \ No newline at end of file diff --git a/src/current/_includes/v23.2/faq/what-is-crdb.md b/src/current/_includes/v23.2/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v23.2/faq/what-is-crdb.md +++ b/src/current/_includes/v23.2/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v23.2/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v23.2/known-limitations/alter-changefeed-cdc-queries.md index cbfa5a818f3..6c02bffd16b 100644 --- a/src/current/_includes/v23.2/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v23.2/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. Tracking GitHub issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/cdc-execution-locality.md b/src/current/_includes/v23.2/known-limitations/cdc-execution-locality.md index 722ccabedbd..082889b672a 100644 --- a/src/current/_includes/v23.2/known-limitations/cdc-execution-locality.md +++ b/src/current/_includes/v23.2/known-limitations/cdc-execution-locality.md @@ -1 +1 @@ -In v23.2.0, changefeeds that use the [`execution_locality` option]({% link {{ page.version.version }}/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality) set to a [secondary region]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) could create a plan that assigns most of the ranges to an [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) on the coordinator node. This leads to an unbalanced plan and slow changefeed progress, particularly when the table is large and has many ranges. This issue is [partially mitigated](https://github.com/cockroachdb/cockroach/commit/5d7714a03a891c9fd5746fb876c39dced4f47205) in v23.2.1 and later. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/124822) \ No newline at end of file +In v23.2.0, changefeeds that use the [`execution_locality` option]({% link {{ page.version.version }}/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality) set to a [secondary region]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) could create a plan that assigns most of the ranges to an [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) on the coordinator node. This leads to an unbalanced plan and slow changefeed progress, particularly when the table is large and has many ranges. This issue is partially mitigated in v23.2.1 and later. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v23.2/known-limitations/cdc-queries-column-families.md index a68a7949824..fd6a60b41c7 100644 --- a/src/current/_includes/v23.2/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v23.2/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/cdc-queries.md b/src/current/_includes/v23.2/known-limitations/cdc-queries.md index 1fcc6988f1a..5ca18d6696d 100644 --- a/src/current/_includes/v23.2/known-limitations/cdc-queries.md +++ b/src/current/_includes/v23.2/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). Tracking GitHub issue +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. Tracking GitHub issue diff --git a/src/current/_includes/v23.2/known-limitations/cdc.md b/src/current/_includes/v23.2/known-limitations/cdc.md index 008fdb46867..89a1aa6165f 100644 --- a/src/current/_includes/v23.2/known-limitations/cdc.md +++ b/src/current/_includes/v23.2/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). Tracking GitHub Issue - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved-option) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. Tracking GitHub Issue +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. Tracking GitHub Issue +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved-option) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. Tracking GitHub Issue - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v23.2/known-limitations/changefeed-column-family-message.md index b8aafbe11dc..0c61edd8d82 100644 --- a/src/current/_includes/v23.2/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v23.2/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/copy-syntax.md b/src/current/_includes/v23.2/known-limitations/copy-syntax.md index 0c6c89299df..2bff2dca9b3 100644 --- a/src/current/_includes/v23.2/known-limitations/copy-syntax.md +++ b/src/current/_includes/v23.2/known-limitations/copy-syntax.md @@ -2,10 +2,10 @@ CockroachDB does not yet support the following `COPY` syntax: - Various `COPY` options (`FORMAT`, `FREEZE`, `QUOTE`, etc.). - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/85572) - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/85573) - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/85574) + - Tracking GitHub Issue + - Tracking GitHub Issue + - Tracking GitHub Issue - `COPY ... FROM ... WHERE `. - - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/54580) + - Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/known-limitations/drop-owned-by-role-limitations.md b/src/current/_includes/v23.2/known-limitations/drop-owned-by-role-limitations.md index cfa718d4e2d..c2cef607345 100644 --- a/src/current/_includes/v23.2/known-limitations/drop-owned-by-role-limitations.md +++ b/src/current/_includes/v23.2/known-limitations/drop-owned-by-role-limitations.md @@ -1,4 +1,4 @@ -- If the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) for which you are trying to `DROP OWNED BY` was granted a [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) (i.e., using the [`GRANT SYSTEM ...`]({% link {{ page.version.version }}/grant.md %}#grant-system-level-privileges-on-the-entire-cluster) statement), the error shown below will be signalled. The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. For more information about this known limitation, see [cockroachdb/cockroach#88149](https://github.com/cockroachdb/cockroach/issues/88149). +- If the [role]({% link {{ page.version.version }}/security-reference/authorization.md %}#roles) for which you are trying to `DROP OWNED BY` was granted a [system-level privilege]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges) (i.e., using the [`GRANT SYSTEM ...`]({% link {{ page.version.version }}/grant.md %}#grant-system-level-privileges-on-the-entire-cluster) statement), the error shown below will be signalled. The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. For more information about this known limitation, see cockroachdb/cockroach#88149. ~~~ ERROR: cannot perform drop owned by if role has synthetic privileges; foo has entries in system.privileges diff --git a/src/current/_includes/v23.2/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v23.2/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v23.2/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v23.2/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v23.2/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v23.2/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v23.2/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/pcr-scheduled-changefeeds.md b/src/current/_includes/v23.2/known-limitations/pcr-scheduled-changefeeds.md index 58d01be65aa..fccca74f637 100644 --- a/src/current/_includes/v23.2/known-limitations/pcr-scheduled-changefeeds.md +++ b/src/current/_includes/v23.2/known-limitations/pcr-scheduled-changefeeds.md @@ -1 +1 @@ -After the [cutover process]({% link {{ page.version.version }}/cutover-replication.md %}) for [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}), [scheduled changefeeds]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) will continue on the promoted cluster. You will need to manage [pausing]({% link {{ page.version.version }}/pause-schedules.md %}) or [canceling]({% link {{ page.version.version }}/drop-schedules.md %}) the schedule on the promoted standby cluster to avoid two clusters running the same changefeed to one sink. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/123776) \ No newline at end of file +After the [cutover process]({% link {{ page.version.version }}/cutover-replication.md %}) for [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}), [scheduled changefeeds]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) will continue on the promoted cluster. You will need to manage [pausing]({% link {{ page.version.version }}/pause-schedules.md %}) or [canceling]({% link {{ page.version.version }}/drop-schedules.md %}) the schedule on the promoted standby cluster to avoid two clusters running the same changefeed to one sink. Tracking GitHub issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v23.2/known-limitations/restore-tables-non-multi-reg.md index 360f93f796f..89b723fdb02 100644 --- a/src/current/_includes/v23.2/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v23.2/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/known-limitations/restore-udf.md b/src/current/_includes/v23.2/known-limitations/restore-udf.md index 053a3b90df7..ce70aea2383 100644 --- a/src/current/_includes/v23.2/known-limitations/restore-udf.md +++ b/src/current/_includes/v23.2/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v23.2/known-limitations/row-level-ttl-limitations.md index ea61345f901..2ad04456283 100644 --- a/src/current/_includes/v23.2/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v23.2/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v23.2/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v23.2/known-limitations/set-transaction-no-rollback.md index c2e78c7d78a..22de1824dc1 100644 --- a/src/current/_includes/v23.2/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v23.2/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/69396) +Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/known-limitations/show-backup-locality-incremental-location.md b/src/current/_includes/v23.2/known-limitations/show-backup-locality-incremental-location.md index 428a78c0da0..db6cb6a6f9e 100644 --- a/src/current/_includes/v23.2/known-limitations/show-backup-locality-incremental-location.md +++ b/src/current/_includes/v23.2/known-limitations/show-backup-locality-incremental-location.md @@ -1 +1 @@ -{% if page.name == "show-backup.md" %}`SHOW BACKUP`{% else %}[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}){% endif %} can display backups taken with the `incremental_location` option **or** for [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}). It will not display backups for locality-aware backups taken with the `incremental_location` option. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/82912). \ No newline at end of file +{% if page.name == "show-backup.md" %}`SHOW BACKUP`{% else %}[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}){% endif %} can display backups taken with the `incremental_location` option **or** for [locality-aware backups]({% link {{ page.version.version }}/take-and-restore-locality-aware-backups.md %}). It will not display backups for locality-aware backups taken with the `incremental_location` option. Tracking GitHub issue. \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/show-backup-symlink.md b/src/current/_includes/v23.2/known-limitations/show-backup-symlink.md index 1316e7d7667..d6d2c0c96a3 100644 --- a/src/current/_includes/v23.2/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v23.2/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. Tracking GitHub Issue \ No newline at end of file diff --git a/src/current/_includes/v23.2/known-limitations/sql-cursors.md b/src/current/_includes/v23.2/known-limitations/sql-cursors.md index d1c42749661..802f577cfdc 100644 --- a/src/current/_includes/v23.2/known-limitations/sql-cursors.md +++ b/src/current/_includes/v23.2/known-limitations/sql-cursors.md @@ -1,9 +1,9 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [cockroachdb/cockroach#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [cockroachdb/cockroach#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [cockroachdb/cockroach#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [cockroachdb/cockroach#77101](https://github.com/cockroachdb/cockroach/issues/77101) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. cockroachdb/cockroach#77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. cockroachdb/cockroach#77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. cockroachdb/cockroach#77099 +- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. cockroachdb/cockroach#77101 - This syntax is accepted (but does not have any effect): {% include_cached copy-clipboard.html %} ~~~ sql @@ -19,6 +19,6 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor] DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar; COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction. ~~~ -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [cockroachdb/cockroach#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [cockroachdb/cockroach#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [cockroachdb/cockroach#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. cockroachdb/cockroach#77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. cockroachdb/cockroach#77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. cockroachdb/cockroach#77104 diff --git a/src/current/_includes/v23.2/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v23.2/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v23.2/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v23.2/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v23.2/known-limitations/userfile-upload-non-recursive.md b/src/current/_includes/v23.2/known-limitations/userfile-upload-non-recursive.md index 19db5fde6a4..c20fc85b310 100644 --- a/src/current/_includes/v23.2/known-limitations/userfile-upload-non-recursive.md +++ b/src/current/_includes/v23.2/known-limitations/userfile-upload-non-recursive.md @@ -1 +1 @@ -- `cockroach userfile upload` does not not currently allow for recursive uploads from a directory. This feature will be present with the `--recursive` flag in future versions. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/pull/65307) +- `cockroach userfile upload` does not not currently allow for recursive uploads from a directory. This feature will be present with the `--recursive` flag in future versions. Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/misc/tooling.md b/src/current/_includes/v23.2/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v23.2/misc/tooling.md +++ b/src/current/_includes/v23.2/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v23.2/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v23.2/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v23.2/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v23.2/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v23.2/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v23.2/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v23.2/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v23.2/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v23.2/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v23.2/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v23.2/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v23.2/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v23.2/orchestration/test-cluster-secure.md b/src/current/_includes/v23.2/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v23.2/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v23.2/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v23.2/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v23.2/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v23.2/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v23.2/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v23.2/sql/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v23.2/sql/cannot-refresh-materialized-views-inside-transactions.md index c78308152f6..f3407abc8d3 100644 --- a/src/current/_includes/v23.2/sql/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v23.2/sql/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/66008) + Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/sql/expression-indexes-cannot-reference-computed-columns.md b/src/current/_includes/v23.2/sql/expression-indexes-cannot-reference-computed-columns.md index 64d80e31fa3..bef1da01ea5 100644 --- a/src/current/_includes/v23.2/sql/expression-indexes-cannot-reference-computed-columns.md +++ b/src/current/_includes/v23.2/sql/expression-indexes-cannot-reference-computed-columns.md @@ -1,3 +1,3 @@ CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/67900) + Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/sql/expressions-as-on-conflict-targets.md b/src/current/_includes/v23.2/sql/expressions-as-on-conflict-targets.md index 882dd905b1d..e9cc6a63e8a 100644 --- a/src/current/_includes/v23.2/sql/expressions-as-on-conflict-targets.md +++ b/src/current/_includes/v23.2/sql/expressions-as-on-conflict-targets.md @@ -37,4 +37,4 @@ INSERT INTO t VALUES (1, 2) ON CONFLICT ((a + b)) DO UPDATE SET a = 10 HINT: try \h INSERT ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/67893) +Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/sql/jsonb-comparison.md b/src/current/_includes/v23.2/sql/jsonb-comparison.md index 7b4435d6fcc..d1b5fff746a 100644 --- a/src/current/_includes/v23.2/sql/jsonb-comparison.md +++ b/src/current/_includes/v23.2/sql/jsonb-comparison.md @@ -10,4 +10,4 @@ You cannot use comparison operators (such as `<` or `>`) on [`JSONB`]({% link {{ SQLSTATE: 22023 ~~~ - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/49144) + Tracking GitHub issue diff --git a/src/current/_includes/v23.2/sql/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v23.2/sql/locality-optimized-search-virtual-computed-columns.md index c016103b1e1..0d948a3dafe 100644 --- a/src/current/_includes/v23.2/sql/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v23.2/sql/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). Tracking GitHub Issue diff --git a/src/current/_includes/v23.2/sql/materialized-views-no-stats.md b/src/current/_includes/v23.2/sql/materialized-views-no-stats.md index e4d262b56ea..e6fa3103388 100644 --- a/src/current/_includes/v23.2/sql/materialized-views-no-stats.md +++ b/src/current/_includes/v23.2/sql/materialized-views-no-stats.md @@ -1,3 +1,3 @@ - The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/78181). + Tracking GitHub Issue. diff --git a/src/current/_includes/v23.2/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v23.2/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v23.2/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v23.2/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v23.2/sql/unsupported-postgres-features.md b/src/current/_includes/v23.2/sql/unsupported-postgres-features.md index 999c4e2e001..57bddc9c388 100644 --- a/src/current/_includes/v23.2/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v23.2/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/_includes/v24.1/backward-incompatible/alpha.1.md b/src/current/_includes/v24.1/backward-incompatible/alpha.1.md index 524b22b0faa..b505caef168 100644 --- a/src/current/_includes/v24.1/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v24.1/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v24.1/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v24.1/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v24.1/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v24.1/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v24.1/cdc/avro-udt-composite.md b/src/current/_includes/v24.1/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v24.1/cdc/avro-udt-composite.md +++ b/src/current/_includes/v24.1/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v24.1/cdc/csv-udt-composite.md b/src/current/_includes/v24.1/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v24.1/cdc/csv-udt-composite.md +++ b/src/current/_includes/v24.1/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v24.1/essential-metrics.md b/src/current/_includes/v24.1/essential-metrics.md index 5f5754d17a9..011f8c8d37e 100644 --- a/src/current/_includes/v24.1/essential-metrics.md +++ b/src/current/_includes/v24.1/essential-metrics.md @@ -201,4 +201,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %}) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) diff --git a/src/current/_includes/v24.1/faq/what-is-crdb.md b/src/current/_includes/v24.1/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v24.1/faq/what-is-crdb.md +++ b/src/current/_includes/v24.1/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v24.1/finalization-required/119894.md b/src/current/_includes/v24.1/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v24.1/finalization-required/119894.md +++ b/src/current/_includes/v24.1/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v24.1/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v24.1/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v24.1/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v24.1/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v24.1/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v24.1/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v24.1/known-limitations/alter-view-limitations.md b/src/current/_includes/v24.1/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v24.1/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/aost-limitations.md b/src/current/_includes/v24.1/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v24.1/known-limitations/aost-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v24.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v24.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v24.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v24.1/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v24.1/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v24.1/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v24.1/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/cdc-queries.md b/src/current/_includes/v24.1/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v24.1/known-limitations/cdc-queries.md +++ b/src/current/_includes/v24.1/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v24.1/known-limitations/cdc.md b/src/current/_includes/v24.1/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v24.1/known-limitations/cdc.md +++ b/src/current/_includes/v24.1/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v24.1/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v24.1/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v24.1/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/copy-syntax.md b/src/current/_includes/v24.1/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v24.1/known-limitations/copy-syntax.md +++ b/src/current/_includes/v24.1/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v24.1/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/drop-column-partial-index.md b/src/current/_includes/v24.1/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v24.1/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v24.1/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v24.1/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v24.1/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/expression-index-limitations.md b/src/current/_includes/v24.1/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v24.1/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v24.1/known-limitations/fast-cutback-latest-timestamp.md b/src/current/_includes/v24.1/known-limitations/fast-cutback-latest-timestamp.md index 1a5b3b91a2c..58c95b83bb5 100644 --- a/src/current/_includes/v24.1/known-limitations/fast-cutback-latest-timestamp.md +++ b/src/current/_includes/v24.1/known-limitations/fast-cutback-latest-timestamp.md @@ -1 +1 @@ -{% include {{ page.version.version }}/physical-replication/fast-cutback-latest-timestamp.md %} [#117984](https://github.com/cockroachdb/cockroach/issues/117984) \ No newline at end of file +{% include {{ page.version.version }}/physical-replication/fast-cutback-latest-timestamp.md %} #117984 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v24.1/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v24.1/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v24.1/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v24.1/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v24.1/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v24.1/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v24.1/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v24.1/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v24.1/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v24.1/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v24.1/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v24.1/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v24.1/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v24.1/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v24.1/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v24.1/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v24.1/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v24.1/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/pcr-scheduled-changefeeds.md b/src/current/_includes/v24.1/known-limitations/pcr-scheduled-changefeeds.md index 3d6b8aa8628..ac29e55a442 100644 --- a/src/current/_includes/v24.1/known-limitations/pcr-scheduled-changefeeds.md +++ b/src/current/_includes/v24.1/known-limitations/pcr-scheduled-changefeeds.md @@ -1 +1 @@ -After the [failover process]({% link {{ page.version.version }}/failover-replication.md %}) for [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}), [scheduled changefeeds]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) will continue on the promoted cluster. You will need to manage [pausing]({% link {{ page.version.version }}/pause-schedules.md %}) or [canceling]({% link {{ page.version.version }}/drop-schedules.md %}) the schedule on the promoted standby cluster to avoid two clusters running the same changefeed to one sink. [#123776](https://github.com/cockroachdb/cockroach/issues/123776) \ No newline at end of file +After the [failover process]({% link {{ page.version.version }}/failover-replication.md %}) for [physical cluster replication]({% link {{ page.version.version }}/physical-cluster-replication-overview.md %}), [scheduled changefeeds]({% link {{ page.version.version }}/create-schedule-for-changefeed.md %}) will continue on the promoted cluster. You will need to manage [pausing]({% link {{ page.version.version }}/pause-schedules.md %}) or [canceling]({% link {{ page.version.version }}/drop-schedules.md %}) the schedule on the promoted standby cluster to avoid two clusters running the same changefeed to one sink. #123776 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/plpgsql-limitations.md b/src/current/_includes/v24.1/known-limitations/plpgsql-limitations.md index ddd8ab55026..f9b261e8821 100644 --- a/src/current/_includes/v24.1/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/plpgsql-limitations.md @@ -1,26 +1,26 @@ {% if page.name != "known-limitations.md" # New limitations in v24.1 %} -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 {% endif %} -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` loops, including `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `RETURN NEXT` and `RETURN QUERY`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` loops, including `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `RETURN NEXT` and `RETURN QUERY`. #117744 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/read-committed-limitations.md b/src/current/_includes/v24.1/known-limitations/read-committed-limitations.md index c322ec2585c..3dab1ab375e 100644 --- a/src/current/_includes/v24.1/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/read-committed-limitations.md @@ -1,7 +1,7 @@ -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- `READ COMMITTED` transactions performing `INSERT`, `UPDATE`, or `UPSERT` cannot access [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables in which [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) and [`PRIMARY KEY`]({% link {{ page.version.version }}/primary-key.md %}) constraints exist, the region is not included in the constraint, and the region cannot be computed from the constraint columns. [#110873](https://github.com/cockroachdb/cockroach/issues/110873) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- `READ COMMITTED` transactions performing `INSERT`, `UPDATE`, or `UPSERT` cannot access [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables in which [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) and [`PRIMARY KEY`]({% link {{ page.version.version }}/primary-key.md %}) constraints exist, the region is not included in the constraint, and the region cannot be computed from the constraint columns. #110873 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/restore-multiregion-match.md b/src/current/_includes/v24.1/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v24.1/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v24.1/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v24.1/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v24.1/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v24.1/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v24.1/known-limitations/restore-udf.md b/src/current/_includes/v24.1/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v24.1/known-limitations/restore-udf.md +++ b/src/current/_includes/v24.1/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/routine-limitations.md b/src/current/_includes/v24.1/known-limitations/routine-limitations.md index 701ea79c75b..d8499797073 100644 --- a/src/current/_includes/v24.1/known-limitations/routine-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/routine-limitations.md @@ -1,9 +1,9 @@ {% if page.name != "known-limitations.md" # New limitations in v24.1 %} -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 {% endif %} -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) \ No newline at end of file +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v24.1/known-limitations/row-level-ttl-limitations.md index c386ba576d7..4460901566d 100644 --- a/src/current/_includes/v24.1/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v24.1/known-limitations/select-for-update-limitations.md b/src/current/_includes/v24.1/known-limitations/select-for-update-limitations.md index 73aaf9fdeb6..210099bacd7 100644 --- a/src/current/_includes/v24.1/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/select-for-update-limitations.md @@ -1,4 +1,4 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). diff --git a/src/current/_includes/v24.1/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v24.1/known-limitations/set-transaction-no-rollback.md index 414cbac6282..5f0145c05c4 100644 --- a/src/current/_includes/v24.1/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v24.1/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[#69396](https://github.com/cockroachdb/cockroach/issues/69396) +#69396 diff --git a/src/current/_includes/v24.1/known-limitations/show-backup-symlink.md b/src/current/_includes/v24.1/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v24.1/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v24.1/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/sql-cursors.md b/src/current/_includes/v24.1/known-limitations/sql-cursors.md index 4c047aa9603..93bbca937b1 100644 --- a/src/current/_includes/v24.1/known-limitations/sql-cursors.md +++ b/src/current/_includes/v24.1/known-limitations/sql-cursors.md @@ -1,9 +1,9 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [#77101](https://github.com/cockroachdb/cockroach/issues/77101) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. #77101 - This syntax is accepted (but does not have any effect): {% include_cached copy-clipboard.html %} ~~~ sql @@ -19,6 +19,6 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor] DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar; COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction. ~~~ -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v24.1/known-limitations/srid-4326-limitations.md b/src/current/_includes/v24.1/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v24.1/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v24.1/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v24.1/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v24.1/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v24.1/known-limitations/stored-proc-limitations.md b/src/current/_includes/v24.1/known-limitations/stored-proc-limitations.md index 70d8cd71791..b544c23c0e6 100644 --- a/src/current/_includes/v24.1/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/stored-proc-limitations.md @@ -1,3 +1,3 @@ {% if page.name != "known-limitations.md" # New limitations in v24.1 %} {% endif %} -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v24.1/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v24.1/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v24.1/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/udf-limitations.md b/src/current/_includes/v24.1/known-limitations/udf-limitations.md index 7555fde890a..a8f3af6c323 100644 --- a/src/current/_includes/v24.1/known-limitations/udf-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/udf-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.1 %} -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 {% endif %} - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. [#92961](https://github.com/cockroachdb/cockroach/issues/92961) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Views. #87699 +- User-defined functions cannot call themselves recursively. #93049 +- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. #92961 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v24.1/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v24.1/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v24.1/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v24.1/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v24.1/misc/tooling.md b/src/current/_includes/v24.1/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v24.1/misc/tooling.md +++ b/src/current/_includes/v24.1/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v24.1/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v24.1/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v24.1/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v24.1/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.1/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v24.1/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v24.1/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v24.1/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.1/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v24.1/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v24.1/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v24.1/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v24.1/orchestration/test-cluster-secure.md b/src/current/_includes/v24.1/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v24.1/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v24.1/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.1/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v24.1/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v24.1/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v24.1/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v24.1/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v24.1/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v24.1/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v24.1/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v24.1/sql/unsupported-postgres-features.md b/src/current/_includes/v24.1/sql/unsupported-postgres-features.md index 999c4e2e001..57bddc9c388 100644 --- a/src/current/_includes/v24.1/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v24.1/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/_includes/v24.2/backward-incompatible/alpha.1.md b/src/current/_includes/v24.2/backward-incompatible/alpha.1.md index 9a4f6dc554f..f813212137c 100644 --- a/src/current/_includes/v24.2/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v24.2/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v24.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v24.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v24.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v24.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v24.2/cdc/avro-udt-composite.md b/src/current/_includes/v24.2/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v24.2/cdc/avro-udt-composite.md +++ b/src/current/_includes/v24.2/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v24.2/cdc/csv-udt-composite.md b/src/current/_includes/v24.2/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v24.2/cdc/csv-udt-composite.md +++ b/src/current/_includes/v24.2/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v24.2/essential-metrics.md b/src/current/_includes/v24.2/essential-metrics.md index 0c1cc00a2d9..95a4f7a38a2 100644 --- a/src/current/_includes/v24.2/essential-metrics.md +++ b/src/current/_includes/v24.2/essential-metrics.md @@ -194,4 +194,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %}) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) diff --git a/src/current/_includes/v24.2/faq/what-is-crdb.md b/src/current/_includes/v24.2/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v24.2/faq/what-is-crdb.md +++ b/src/current/_includes/v24.2/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v24.2/finalization-required/119894.md b/src/current/_includes/v24.2/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v24.2/finalization-required/119894.md +++ b/src/current/_includes/v24.2/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v24.2/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v24.2/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v24.2/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v24.2/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v24.2/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v24.2/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v24.2/known-limitations/alter-view-limitations.md b/src/current/_includes/v24.2/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v24.2/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/aost-limitations.md b/src/current/_includes/v24.2/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v24.2/known-limitations/aost-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v24.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v24.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v24.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v24.2/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v24.2/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v24.2/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v24.2/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/cdc-queries.md b/src/current/_includes/v24.2/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v24.2/known-limitations/cdc-queries.md +++ b/src/current/_includes/v24.2/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v24.2/known-limitations/cdc.md b/src/current/_includes/v24.2/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v24.2/known-limitations/cdc.md +++ b/src/current/_includes/v24.2/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v24.2/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v24.2/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v24.2/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/copy-syntax.md b/src/current/_includes/v24.2/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v24.2/known-limitations/copy-syntax.md +++ b/src/current/_includes/v24.2/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v24.2/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/drop-column-partial-index.md b/src/current/_includes/v24.2/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v24.2/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v24.2/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v24.2/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v24.2/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/expression-index-limitations.md b/src/current/_includes/v24.2/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v24.2/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v24.2/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v24.2/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v24.2/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v24.2/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v24.2/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v24.2/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v24.2/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v24.2/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v24.2/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v24.2/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v24.2/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v24.2/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v24.2/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v24.2/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v24.2/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v24.2/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v24.2/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v24.2/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v24.2/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/plpgsql-limitations.md b/src/current/_includes/v24.2/known-limitations/plpgsql-limitations.md index cfb6c0e102a..9403d585fc4 100644 --- a/src/current/_includes/v24.2/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/plpgsql-limitations.md @@ -1,26 +1,26 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` loops, including `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `RETURN NEXT` and `RETURN QUERY`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` loops, including `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `RETURN NEXT` and `RETURN QUERY`. #117744 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/read-committed-limitations.md b/src/current/_includes/v24.2/known-limitations/read-committed-limitations.md index 947cf56814d..4d05441de0a 100644 --- a/src/current/_includes/v24.2/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/read-committed-limitations.md @@ -1,7 +1,7 @@ -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- `READ COMMITTED` transactions performing `INSERT`, `UPDATE`, or `UPSERT` cannot access [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables in which [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) and [`PRIMARY KEY`]({% link {{ page.version.version }}/primary-key.md %}) constraints exist, the region is not included in the constraint, and the region cannot be computed from the constraint columns. [#110873](https://github.com/cockroachdb/cockroach/issues/110873) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- `READ COMMITTED` transactions performing `INSERT`, `UPDATE`, or `UPSERT` cannot access [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables in which [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) and [`PRIMARY KEY`]({% link {{ page.version.version }}/primary-key.md %}) constraints exist, the region is not included in the constraint, and the region cannot be computed from the constraint columns. #110873 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/restore-multiregion-match.md b/src/current/_includes/v24.2/known-limitations/restore-multiregion-match.md index 20ddbb0c930..4c533f4283e 100644 --- a/src/current/_includes/v24.2/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v24.2/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v24.2/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v24.2/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v24.2/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v24.2/known-limitations/restore-udf.md b/src/current/_includes/v24.2/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v24.2/known-limitations/restore-udf.md +++ b/src/current/_includes/v24.2/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/routine-limitations.md b/src/current/_includes/v24.2/known-limitations/routine-limitations.md index 4718c6c7abf..fa16791b56a 100644 --- a/src/current/_includes/v24.2/known-limitations/routine-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. 123048 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/select-for-update-limitations.md b/src/current/_includes/v24.2/known-limitations/select-for-update-limitations.md index 73aaf9fdeb6..210099bacd7 100644 --- a/src/current/_includes/v24.2/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/select-for-update-limitations.md @@ -1,4 +1,4 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). diff --git a/src/current/_includes/v24.2/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v24.2/known-limitations/set-transaction-no-rollback.md index 414cbac6282..5f0145c05c4 100644 --- a/src/current/_includes/v24.2/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v24.2/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[#69396](https://github.com/cockroachdb/cockroach/issues/69396) +#69396 diff --git a/src/current/_includes/v24.2/known-limitations/show-backup-symlink.md b/src/current/_includes/v24.2/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v24.2/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v24.2/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/sql-cursors.md b/src/current/_includes/v24.2/known-limitations/sql-cursors.md index 4c047aa9603..93bbca937b1 100644 --- a/src/current/_includes/v24.2/known-limitations/sql-cursors.md +++ b/src/current/_includes/v24.2/known-limitations/sql-cursors.md @@ -1,9 +1,9 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [#77101](https://github.com/cockroachdb/cockroach/issues/77101) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. #77101 - This syntax is accepted (but does not have any effect): {% include_cached copy-clipboard.html %} ~~~ sql @@ -19,6 +19,6 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor] DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar; COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction. ~~~ -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v24.2/known-limitations/srid-4326-limitations.md b/src/current/_includes/v24.2/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v24.2/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v24.2/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v24.2/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v24.2/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v24.2/known-limitations/stored-proc-limitations.md b/src/current/_includes/v24.2/known-limitations/stored-proc-limitations.md index b2ba1b61562..bce2e47d6b8 100644 --- a/src/current/_includes/v24.2/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/stored-proc-limitations.md @@ -1,3 +1,3 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v24.2/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v24.2/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v24.2/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/udf-limitations.md b/src/current/_includes/v24.2/known-limitations/udf-limitations.md index 57011914407..e0cbfd9bd7e 100644 --- a/src/current/_includes/v24.2/known-limitations/udf-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/udf-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. [#92961](https://github.com/cockroachdb/cockroach/issues/92961) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Views. #87699 +- User-defined functions cannot call themselves recursively. #93049 +- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. #92961 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v24.2/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v24.2/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v24.2/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v24.2/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v24.2/misc/tooling.md b/src/current/_includes/v24.2/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v24.2/misc/tooling.md +++ b/src/current/_includes/v24.2/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v24.2/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v24.2/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v24.2/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v24.2/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.2/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v24.2/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v24.2/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v24.2/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.2/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v24.2/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v24.2/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v24.2/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v24.2/orchestration/test-cluster-secure.md b/src/current/_includes/v24.2/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v24.2/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v24.2/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.2/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v24.2/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v24.2/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v24.2/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v24.2/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v24.2/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v24.2/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v24.2/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v24.3/backward-incompatible/alpha.1.md b/src/current/_includes/v24.3/backward-incompatible/alpha.1.md index 8d4004422ad..f10881f0418 100644 --- a/src/current/_includes/v24.3/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v24.3/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v24.3/cdc/avro-udt-composite.md b/src/current/_includes/v24.3/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v24.3/cdc/avro-udt-composite.md +++ b/src/current/_includes/v24.3/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v24.3/cdc/csv-udt-composite.md b/src/current/_includes/v24.3/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v24.3/cdc/csv-udt-composite.md +++ b/src/current/_includes/v24.3/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v24.3/essential-metrics.md b/src/current/_includes/v24.3/essential-metrics.md index 1b0aac55a0b..630f403573a 100644 --- a/src/current/_includes/v24.3/essential-metrics.md +++ b/src/current/_includes/v24.3/essential-metrics.md @@ -201,4 +201,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %}) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) diff --git a/src/current/_includes/v24.3/faq/what-is-crdb.md b/src/current/_includes/v24.3/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v24.3/faq/what-is-crdb.md +++ b/src/current/_includes/v24.3/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v24.3/finalization-required/119894.md b/src/current/_includes/v24.3/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v24.3/finalization-required/119894.md +++ b/src/current/_includes/v24.3/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v24.3/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v24.3/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v24.3/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v24.3/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v24.3/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v24.3/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v24.3/known-limitations/alter-view-limitations.md b/src/current/_includes/v24.3/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v24.3/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/aost-limitations.md b/src/current/_includes/v24.3/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v24.3/known-limitations/aost-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v24.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v24.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v24.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v24.3/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v24.3/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v24.3/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v24.3/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/cdc-queries.md b/src/current/_includes/v24.3/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v24.3/known-limitations/cdc-queries.md +++ b/src/current/_includes/v24.3/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v24.3/known-limitations/cdc.md b/src/current/_includes/v24.3/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v24.3/known-limitations/cdc.md +++ b/src/current/_includes/v24.3/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v24.3/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v24.3/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v24.3/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md b/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md index 0635319c5af..911bb3119c8 100644 --- a/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md +++ b/src/current/_includes/v24.3/known-limitations/compression-level-kafka-config.md @@ -1 +1 @@ -Changefeeds created in v24.3 of CockroachDB that emit to [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), or changefeeds created in earlier versions with the `changefeed.new_kafka_sink.enabled` cluster setting enabled, do not support negative compression level values for `GZIP` compression in the [`kafka_sink_config = {... "CompressionLevel" = ...}`]({% link {{ page.version.version }}/changefeed-sinks.md %}#compressionlevel) option field. [#136492](https://github.com/cockroachdb/cockroach/issues/136492) \ No newline at end of file +Changefeeds created in v24.3 of CockroachDB that emit to [Kafka]({% link {{ page.version.version }}/changefeed-sinks.md %}#kafka), or changefeeds created in earlier versions with the `changefeed.new_kafka_sink.enabled` cluster setting enabled, do not support negative compression level values for `GZIP` compression in the [`kafka_sink_config = {... "CompressionLevel" = ...}`]({% link {{ page.version.version }}/changefeed-sinks.md %}#compressionlevel) option field. #136492 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/copy-syntax.md b/src/current/_includes/v24.3/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v24.3/known-limitations/copy-syntax.md +++ b/src/current/_includes/v24.3/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v24.3/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v24.3/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v24.3/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v24.3/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/drop-column-partial-index.md b/src/current/_includes/v24.3/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v24.3/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v24.3/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v24.3/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v24.3/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v24.3/known-limitations/drop-trigger-limitations.md index 90745f7e17a..2a633ccb369 100644 --- a/src/current/_includes/v24.3/known-limitations/drop-trigger-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/drop-trigger-limitations.md @@ -1 +1 @@ -[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. #128151 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/expression-index-limitations.md b/src/current/_includes/v24.3/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v24.3/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v24.3/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v24.3/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v24.3/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v24.3/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v24.3/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v24.3/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v24.3/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v24.3/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/ldr-column-families.md b/src/current/_includes/v24.3/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v24.3/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v24.3/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/ldr-composite-primary.md b/src/current/_includes/v24.3/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v24.3/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v24.3/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/ldr-indexes.md b/src/current/_includes/v24.3/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v24.3/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v24.3/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/ldr-sequences.md b/src/current/_includes/v24.3/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v24.3/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v24.3/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/ldr-triggers.md b/src/current/_includes/v24.3/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v24.3/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v24.3/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/ldr-udfs.md b/src/current/_includes/v24.3/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v24.3/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v24.3/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v24.3/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v24.3/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v24.3/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v24.3/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v24.3/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v24.3/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v24.3/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v24.3/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v24.3/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v24.3/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v24.3/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/plpgsql-limitations.md b/src/current/_includes/v24.3/known-limitations/plpgsql-limitations.md index 83e47a0bdaa..3db60046bea 100644 --- a/src/current/_includes/v24.3/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/plpgsql-limitations.md @@ -1,26 +1,26 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `RETURN NEXT` and `RETURN QUERY`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `RETURN NEXT` and `RETURN QUERY`. #117744 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/read-committed-limitations.md b/src/current/_includes/v24.3/known-limitations/read-committed-limitations.md index 63f83b15dd8..1f466997ed8 100644 --- a/src/current/_includes/v24.3/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/read-committed-limitations.md @@ -1,6 +1,6 @@ -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/restore-multiregion-match.md b/src/current/_includes/v24.3/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v24.3/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v24.3/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v24.3/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v24.3/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v24.3/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v24.3/known-limitations/restore-udf.md b/src/current/_includes/v24.3/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v24.3/known-limitations/restore-udf.md +++ b/src/current/_includes/v24.3/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/routine-limitations.md b/src/current/_includes/v24.3/known-limitations/routine-limitations.md index 4718c6c7abf..fa16791b56a 100644 --- a/src/current/_includes/v24.3/known-limitations/routine-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. 123048 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v24.3/known-limitations/row-level-ttl-limitations.md index c386ba576d7..4460901566d 100644 --- a/src/current/_includes/v24.3/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v24.3/known-limitations/select-for-update-limitations.md b/src/current/_includes/v24.3/known-limitations/select-for-update-limitations.md index 73aaf9fdeb6..210099bacd7 100644 --- a/src/current/_includes/v24.3/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/select-for-update-limitations.md @@ -1,4 +1,4 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). diff --git a/src/current/_includes/v24.3/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v24.3/known-limitations/set-transaction-no-rollback.md index 414cbac6282..5f0145c05c4 100644 --- a/src/current/_includes/v24.3/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v24.3/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[#69396](https://github.com/cockroachdb/cockroach/issues/69396) +#69396 diff --git a/src/current/_includes/v24.3/known-limitations/show-backup-symlink.md b/src/current/_includes/v24.3/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v24.3/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v24.3/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/sql-cursors.md b/src/current/_includes/v24.3/known-limitations/sql-cursors.md index 4c047aa9603..93bbca937b1 100644 --- a/src/current/_includes/v24.3/known-limitations/sql-cursors.md +++ b/src/current/_includes/v24.3/known-limitations/sql-cursors.md @@ -1,9 +1,9 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [#77101](https://github.com/cockroachdb/cockroach/issues/77101) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. #77101 - This syntax is accepted (but does not have any effect): {% include_cached copy-clipboard.html %} ~~~ sql @@ -19,6 +19,6 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor] DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar; COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction. ~~~ -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v24.3/known-limitations/srid-4326-limitations.md b/src/current/_includes/v24.3/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v24.3/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v24.3/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v24.3/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v24.3/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v24.3/known-limitations/stored-proc-limitations.md b/src/current/_includes/v24.3/known-limitations/stored-proc-limitations.md index b2ba1b61562..bce2e47d6b8 100644 --- a/src/current/_includes/v24.3/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/stored-proc-limitations.md @@ -1,3 +1,3 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/trigger-limitations.md b/src/current/_includes/v24.3/known-limitations/trigger-limitations.md index 7373cd486e7..f2514d24113 100644 --- a/src/current/_includes/v24.3/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/trigger-limitations.md @@ -1,6 +1,6 @@ -- `CREATE OR REPLACE TRIGGER` is not supported. [#128422](https://github.com/cockroachdb/cockroach/issues/128422) -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) +- `CREATE OR REPLACE TRIGGER` is not supported. #128422 +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. #134555 +- Hidden columns are not visible to triggers. #133331 - {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v24.3/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v24.3/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v24.3/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/udf-limitations.md b/src/current/_includes/v24.3/known-limitations/udf-limitations.md index 57011914407..e0cbfd9bd7e 100644 --- a/src/current/_includes/v24.3/known-limitations/udf-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/udf-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. [#92961](https://github.com/cockroachdb/cockroach/issues/92961) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Views. #87699 +- User-defined functions cannot call themselves recursively. #93049 +- [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. #92961 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v24.3/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v24.3/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v24.3/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v24.3/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v24.3/misc/tooling.md b/src/current/_includes/v24.3/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v24.3/misc/tooling.md +++ b/src/current/_includes/v24.3/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v24.3/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v24.3/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v24.3/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v24.3/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.3/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v24.3/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v24.3/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v24.3/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.3/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v24.3/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v24.3/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v24.3/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v24.3/orchestration/test-cluster-secure.md b/src/current/_includes/v24.3/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v24.3/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v24.3/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v24.3/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v24.3/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v24.3/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v24.3/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v24.3/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v24.3/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v24.3/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v24.3/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v24.3/sql/unsupported-postgres-features.md b/src/current/_includes/v24.3/sql/unsupported-postgres-features.md index a89650e38e9..9ad89047189 100644 --- a/src/current/_includes/v24.3/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v24.3/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/_includes/v25.1/backward-incompatible/alpha.1.md b/src/current/_includes/v25.1/backward-incompatible/alpha.1.md index ef251b7cf5b..710c420079d 100644 --- a/src/current/_includes/v25.1/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v25.1/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v25.1/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v25.1/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v25.1/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v25.1/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v25.1/cdc/avro-udt-composite.md b/src/current/_includes/v25.1/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v25.1/cdc/avro-udt-composite.md +++ b/src/current/_includes/v25.1/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v25.1/cdc/csv-udt-composite.md b/src/current/_includes/v25.1/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v25.1/cdc/csv-udt-composite.md +++ b/src/current/_includes/v25.1/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v25.1/essential-metrics.md b/src/current/_includes/v25.1/essential-metrics.md index d3acb380d44..58f88d9ca87 100644 --- a/src/current/_includes/v25.1/essential-metrics.md +++ b/src/current/_includes/v25.1/essential-metrics.md @@ -201,4 +201,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %}) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) diff --git a/src/current/_includes/v25.1/faq/what-is-crdb.md b/src/current/_includes/v25.1/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v25.1/faq/what-is-crdb.md +++ b/src/current/_includes/v25.1/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v25.1/finalization-required/119894.md b/src/current/_includes/v25.1/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v25.1/finalization-required/119894.md +++ b/src/current/_includes/v25.1/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v25.1/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md b/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/aost-limitations.md b/src/current/_includes/v25.1/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v25.1/known-limitations/aost-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v25.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v25.1/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/cdc-queries.md b/src/current/_includes/v25.1/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v25.1/known-limitations/cdc-queries.md +++ b/src/current/_includes/v25.1/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v25.1/known-limitations/cdc.md b/src/current/_includes/v25.1/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v25.1/known-limitations/cdc.md +++ b/src/current/_includes/v25.1/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v25.1/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/copy-syntax.md b/src/current/_includes/v25.1/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v25.1/known-limitations/copy-syntax.md +++ b/src/current/_includes/v25.1/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v25.1/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md b/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v25.1/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md index 90745f7e17a..2a633ccb369 100644 --- a/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/drop-trigger-limitations.md @@ -1 +1 @@ -[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. #128151 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md b/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v25.1/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/ldr-column-families.md b/src/current/_includes/v25.1/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v25.1/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v25.1/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md b/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v25.1/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/ldr-indexes.md b/src/current/_includes/v25.1/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v25.1/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v25.1/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/ldr-sequences.md b/src/current/_includes/v25.1/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v25.1/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v25.1/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/ldr-triggers.md b/src/current/_includes/v25.1/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v25.1/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v25.1/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/ldr-udfs.md b/src/current/_includes/v25.1/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v25.1/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v25.1/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v25.1/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v25.1/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v25.1/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md b/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md index 83e47a0bdaa..3db60046bea 100644 --- a/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/plpgsql-limitations.md @@ -1,26 +1,26 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `RETURN NEXT` and `RETURN QUERY`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `RETURN NEXT` and `RETURN QUERY`. #117744 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md b/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md index 63f83b15dd8..1f466997ed8 100644 --- a/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/read-committed-limitations.md @@ -1,6 +1,6 @@ -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md b/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md index 20ddbb0c930..4c533f4283e 100644 --- a/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v25.1/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v25.1/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v25.1/known-limitations/restore-udf.md b/src/current/_includes/v25.1/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v25.1/known-limitations/restore-udf.md +++ b/src/current/_includes/v25.1/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/routine-limitations.md b/src/current/_includes/v25.1/known-limitations/routine-limitations.md index 4718c6c7abf..fa16791b56a 100644 --- a/src/current/_includes/v25.1/known-limitations/routine-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. 123048 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md b/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md index 73aaf9fdeb6..210099bacd7 100644 --- a/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/select-for-update-limitations.md @@ -1,4 +1,4 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#epoch-based-leases-table-data) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). diff --git a/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md index 414cbac6282..5f0145c05c4 100644 --- a/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v25.1/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[#69396](https://github.com/cockroachdb/cockroach/issues/69396) +#69396 diff --git a/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md b/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v25.1/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/sql-cursors.md b/src/current/_includes/v25.1/known-limitations/sql-cursors.md index 4c047aa9603..93bbca937b1 100644 --- a/src/current/_includes/v25.1/known-limitations/sql-cursors.md +++ b/src/current/_includes/v25.1/known-limitations/sql-cursors.md @@ -1,9 +1,9 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. [#77101](https://github.com/cockroachdb/cockroach/issues/77101) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- `WITH HOLD`, which allows keeping a cursor open for longer than a transaction by writing its results into a buffer, is accepted as valid syntax within a single transaction but is not supported. It acts as a no-op and does not actually perform the function of `WITH HOLD`, which is to make the cursor live outside its parent transaction. Instead, if you are using `WITH HOLD`, you will be forced to close that cursor within the transaction it was created in. #77101 - This syntax is accepted (but does not have any effect): {% include_cached copy-clipboard.html %} ~~~ sql @@ -19,6 +19,6 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor] DECLARE test_cur CURSOR WITH HOLD FOR SELECT * FROM foo ORDER BY bar; COMMIT; -- This will fail with an error because CLOSE test_cur was not called inside the transaction. ~~~ -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md b/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v25.1/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md b/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md index b2ba1b61562..bce2e47d6b8 100644 --- a/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/stored-proc-limitations.md @@ -1,3 +1,3 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/trigger-limitations.md b/src/current/_includes/v25.1/known-limitations/trigger-limitations.md index 7373cd486e7..f2514d24113 100644 --- a/src/current/_includes/v25.1/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/trigger-limitations.md @@ -1,6 +1,6 @@ -- `CREATE OR REPLACE TRIGGER` is not supported. [#128422](https://github.com/cockroachdb/cockroach/issues/128422) -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) +- `CREATE OR REPLACE TRIGGER` is not supported. #128422 +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. #134555 +- Hidden columns are not visible to triggers. #133331 - {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v25.1/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/udf-limitations.md b/src/current/_includes/v25.1/known-limitations/udf-limitations.md index 8b2ef66b403..5e96fd49283 100644 --- a/src/current/_includes/v25.1/known-limitations/udf-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/udf-limitations.md @@ -1,9 +1,9 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Views. #87699 +- User-defined functions cannot call themselves recursively. #93049 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v25.1/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v25.1/misc/tooling.md b/src/current/_includes/v25.1/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v25.1/misc/tooling.md +++ b/src/current/_includes/v25.1/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v25.1/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v25.1/orchestration/test-cluster-secure.md b/src/current/_includes/v25.1/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v25.1/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v25.1/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v25.1/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v25.1/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v25.2/backward-incompatible/alpha.1.md b/src/current/_includes/v25.2/backward-incompatible/alpha.1.md index dff3b4ea3c5..a39cbe3e4d3 100644 --- a/src/current/_includes/v25.2/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v25.2/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v25.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v25.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v25.2/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v25.2/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v25.2/cdc/avro-udt-composite.md b/src/current/_includes/v25.2/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v25.2/cdc/avro-udt-composite.md +++ b/src/current/_includes/v25.2/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v25.2/cdc/csv-udt-composite.md b/src/current/_includes/v25.2/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v25.2/cdc/csv-udt-composite.md +++ b/src/current/_includes/v25.2/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v25.2/essential-metrics.md b/src/current/_includes/v25.2/essential-metrics.md index 7c958db3f50..4ffccd96275 100644 --- a/src/current/_includes/v25.2/essential-metrics.md +++ b/src/current/_includes/v25.2/essential-metrics.md @@ -201,4 +201,4 @@ If [Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}) is co - [Custom Chart Debug Page]({% link {{ page.version.version }}/ui-custom-chart-debug-page.md %}) - [Cluster API]({% link {{ page.version.version }}/cluster-api.md %}) - [Essential Alerts]({% link {{ page.version.version }}/essential-alerts-{{ include.deployment}}.md %}) -- [CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files)](https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/workspaces/db-console/src/views/cluster/containers/nodeGraphs/dashboards) +- CockroachDB Source Code - DB Console metrics to graphs mappings (in *.tsx files) diff --git a/src/current/_includes/v25.2/faq/what-is-crdb.md b/src/current/_includes/v25.2/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v25.2/faq/what-is-crdb.md +++ b/src/current/_includes/v25.2/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v25.2/finalization-required/119894.md b/src/current/_includes/v25.2/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v25.2/finalization-required/119894.md +++ b/src/current/_includes/v25.2/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v25.2/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v25.2/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v25.2/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v25.2/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v25.2/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v25.2/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v25.2/known-limitations/alter-view-limitations.md b/src/current/_includes/v25.2/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v25.2/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/aost-limitations.md b/src/current/_includes/v25.2/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v25.2/known-limitations/aost-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v25.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v25.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v25.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v25.2/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v25.2/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v25.2/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v25.2/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/cdc-queries.md b/src/current/_includes/v25.2/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v25.2/known-limitations/cdc-queries.md +++ b/src/current/_includes/v25.2/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v25.2/known-limitations/cdc.md b/src/current/_includes/v25.2/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v25.2/known-limitations/cdc.md +++ b/src/current/_includes/v25.2/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v25.2/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v25.2/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v25.2/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/copy-syntax.md b/src/current/_includes/v25.2/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v25.2/known-limitations/copy-syntax.md +++ b/src/current/_includes/v25.2/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v25.2/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v25.2/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v25.2/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v25.2/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/drop-column-partial-index.md b/src/current/_includes/v25.2/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v25.2/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v25.2/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v25.2/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v25.2/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v25.2/known-limitations/drop-trigger-limitations.md index 90745f7e17a..2a633ccb369 100644 --- a/src/current/_includes/v25.2/known-limitations/drop-trigger-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/drop-trigger-limitations.md @@ -1 +1 @@ -[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. #128151 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/expression-index-limitations.md b/src/current/_includes/v25.2/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v25.2/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v25.2/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v25.2/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v25.2/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v25.2/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v25.2/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v25.2/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v25.2/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v25.2/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/jsonpath-limitations.md b/src/current/_includes/v25.2/known-limitations/jsonpath-limitations.md index 9b51bfb6e87..a74232bd40b 100644 --- a/src/current/_includes/v25.2/known-limitations/jsonpath-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/jsonpath-limitations.md @@ -1,2 +1,2 @@ -- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. [#144255](https://github.com/cockroachdb/cockroach/issues/144255) -- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. [#145099](https://github.com/cockroachdb/cockroach/issues/145099) \ No newline at end of file +- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. #144255 +- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. #145099 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/ldr-column-families.md b/src/current/_includes/v25.2/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v25.2/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v25.2/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/ldr-composite-primary.md b/src/current/_includes/v25.2/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v25.2/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v25.2/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/ldr-indexes.md b/src/current/_includes/v25.2/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v25.2/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v25.2/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/ldr-sequences.md b/src/current/_includes/v25.2/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v25.2/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v25.2/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/ldr-triggers.md b/src/current/_includes/v25.2/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v25.2/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v25.2/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/ldr-udfs.md b/src/current/_includes/v25.2/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v25.2/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v25.2/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v25.2/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v25.2/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v25.2/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v25.2/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v25.2/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v25.2/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v25.2/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v25.2/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v25.2/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v25.2/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v25.2/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/plpgsql-limitations.md b/src/current/_includes/v25.2/known-limitations/plpgsql-limitations.md index c17954748a0..6050d209753 100644 --- a/src/current/_includes/v25.2/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/plpgsql-limitations.md @@ -1,25 +1,25 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/read-committed-limitations.md b/src/current/_includes/v25.2/known-limitations/read-committed-limitations.md index 63f83b15dd8..1f466997ed8 100644 --- a/src/current/_includes/v25.2/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/read-committed-limitations.md @@ -1,6 +1,6 @@ -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/restore-multiregion-match.md b/src/current/_includes/v25.2/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v25.2/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v25.2/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v25.2/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v25.2/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v25.2/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v25.2/known-limitations/restore-udf.md b/src/current/_includes/v25.2/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v25.2/known-limitations/restore-udf.md +++ b/src/current/_includes/v25.2/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/rls-values-on-conflict-do-nothing.md b/src/current/_includes/v25.2/known-limitations/rls-values-on-conflict-do-nothing.md index c85dea7987a..d274e573dc9 100644 --- a/src/current/_includes/v25.2/known-limitations/rls-values-on-conflict-do-nothing.md +++ b/src/current/_includes/v25.2/known-limitations/rls-values-on-conflict-do-nothing.md @@ -1 +1 @@ -`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. [#35370](https://github.com/cockroachdb/cockroach/issues/35370). +`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. #35370. diff --git a/src/current/_includes/v25.2/known-limitations/rls-visibility-issue.md b/src/current/_includes/v25.2/known-limitations/rls-visibility-issue.md index 453059825a3..5d16325382f 100644 --- a/src/current/_includes/v25.2/known-limitations/rls-visibility-issue.md +++ b/src/current/_includes/v25.2/known-limitations/rls-visibility-issue.md @@ -1 +1 @@ -Under certain conditions, such as when executing certain SQL functions, CockroachDB's row-level security (RLS) implementation exposes metadata about the number of restricted rows in a table. For example, when a user applies arbitrary SQL filters on a table with RLS enabled, it's possible for the user to see how many total rows are in the table; this count includes rows that the user does not have direct access to. This metadata leakage can also occur when statements like [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain.md %}) are used, as the output includes a count of the number of rows scanned by the query that can include the number of restricted rows, even though the rows themselves are not directly accessible. [#146952](https://github.com/cockroachdb/cockroach/issues/146952) +Under certain conditions, such as when executing certain SQL functions, CockroachDB's row-level security (RLS) implementation exposes metadata about the number of restricted rows in a table. For example, when a user applies arbitrary SQL filters on a table with RLS enabled, it's possible for the user to see how many total rows are in the table; this count includes rows that the user does not have direct access to. This metadata leakage can also occur when statements like [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain.md %}) are used, as the output includes a count of the number of rows scanned by the query that can include the number of restricted rows, even though the rows themselves are not directly accessible. #146952 diff --git a/src/current/_includes/v25.2/known-limitations/routine-limitations.md b/src/current/_includes/v25.2/known-limitations/routine-limitations.md index 4718c6c7abf..fa16791b56a 100644 --- a/src/current/_includes/v25.2/known-limitations/routine-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. 123048 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v25.2/known-limitations/row-level-ttl-limitations.md index c386ba576d7..4460901566d 100644 --- a/src/current/_includes/v25.2/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v25.2/known-limitations/select-for-update-limitations.md b/src/current/_includes/v25.2/known-limitations/select-for-update-limitations.md index 1ed68e692e4..e599353243a 100644 --- a/src/current/_includes/v25.2/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/select-for-update-limitations.md @@ -1,4 +1,4 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). diff --git a/src/current/_includes/v25.2/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v25.2/known-limitations/set-transaction-no-rollback.md index 21864daa9de..885a380d32e 100644 --- a/src/current/_includes/v25.2/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v25.2/known-limitations/set-transaction-no-rollback.md @@ -14,4 +14,4 @@ timezone 3 ~~~ -[#69396](https://github.com/cockroachdb/cockroach/issues/69396), [#148766](https://github.com/cockroachdb/cockroach/issues/148766) \ No newline at end of file +#69396, #148766 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/show-backup-symlink.md b/src/current/_includes/v25.2/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v25.2/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v25.2/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/sql-cursors.md b/src/current/_includes/v25.2/known-limitations/sql-cursors.md index bceff96d5a6..1924400e85d 100644 --- a/src/current/_includes/v25.2/known-limitations/sql-cursors.md +++ b/src/current/_includes/v25.2/known-limitations/sql-cursors.md @@ -1,8 +1,8 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v25.2/known-limitations/srid-4326-limitations.md b/src/current/_includes/v25.2/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v25.2/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v25.2/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v25.2/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v25.2/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v25.2/known-limitations/stored-proc-limitations.md b/src/current/_includes/v25.2/known-limitations/stored-proc-limitations.md index b2ba1b61562..bce2e47d6b8 100644 --- a/src/current/_includes/v25.2/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/stored-proc-limitations.md @@ -1,3 +1,3 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/trigger-limitations.md b/src/current/_includes/v25.2/known-limitations/trigger-limitations.md index 7373cd486e7..f2514d24113 100644 --- a/src/current/_includes/v25.2/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/trigger-limitations.md @@ -1,6 +1,6 @@ -- `CREATE OR REPLACE TRIGGER` is not supported. [#128422](https://github.com/cockroachdb/cockroach/issues/128422) -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) +- `CREATE OR REPLACE TRIGGER` is not supported. #128422 +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. #134555 +- Hidden columns are not visible to triggers. #133331 - {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v25.2/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v25.2/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v25.2/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/udf-limitations.md b/src/current/_includes/v25.2/known-limitations/udf-limitations.md index 8b2ef66b403..5e96fd49283 100644 --- a/src/current/_includes/v25.2/known-limitations/udf-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/udf-limitations.md @@ -1,9 +1,9 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Views. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Views. #87699 +- User-defined functions cannot call themselves recursively. #93049 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/vector-limitations.md b/src/current/_includes/v25.2/known-limitations/vector-limitations.md index 603d96f9396..22fa40d8560 100644 --- a/src/current/_includes/v25.2/known-limitations/vector-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/vector-limitations.md @@ -1,7 +1,7 @@ - {% include {{ page.version.version }}/sql/vector-batch-inserts.md %} -- Creating a vector index through a backfill disables mutations ([`INSERT`]({% link {{ page.version.version }}/insert.md %}), [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}), [`UPDATE`]({% link {{ page.version.version }}/update.md %}), [`DELETE`]({% link {{ page.version.version }}/delete.md %})) on the table. [#144443](https://github.com/cockroachdb/cockroach/issues/144443) -- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. [#145227](https://github.com/cockroachdb/cockroach/issues/145227) -- Only L2 distance (`<->`) searches are accelerated. [#144016](https://github.com/cockroachdb/cockroach/issues/144016) -- Index acceleration with filters is only supported if the filters match prefix columns. [#146145](https://github.com/cockroachdb/cockroach/issues/146145) -- Index recommendations are not provided for vector indexes. [#146146](https://github.com/cockroachdb/cockroach/issues/146146) -- Vector index queries may return incorrect results when the underlying table uses multiple column families. [#146046](https://github.com/cockroachdb/cockroach/issues/146046) \ No newline at end of file +- Creating a vector index through a backfill disables mutations ([`INSERT`]({% link {{ page.version.version }}/insert.md %}), [`UPSERT`]({% link {{ page.version.version }}/upsert.md %}), [`UPDATE`]({% link {{ page.version.version }}/update.md %}), [`DELETE`]({% link {{ page.version.version }}/delete.md %})) on the table. #144443 +- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. #145227 +- Only L2 distance (`<->`) searches are accelerated. #144016 +- Index acceleration with filters is only supported if the filters match prefix columns. #146145 +- Index recommendations are not provided for vector indexes. #146146 +- Vector index queries may return incorrect results when the underlying table uses multiple column families. #146046 \ No newline at end of file diff --git a/src/current/_includes/v25.2/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v25.2/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v25.2/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v25.2/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v25.2/misc/tooling.md b/src/current/_includes/v25.2/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v25.2/misc/tooling.md +++ b/src/current/_includes/v25.2/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v25.2/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v25.2/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v25.2/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v25.2/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.2/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v25.2/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v25.2/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v25.2/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.2/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v25.2/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v25.2/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v25.2/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v25.2/orchestration/test-cluster-secure.md b/src/current/_includes/v25.2/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v25.2/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v25.2/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.2/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v25.2/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v25.2/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v25.2/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.2/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v25.2/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v25.2/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v25.2/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v25.2/sql/unsupported-postgres-features.md b/src/current/_includes/v25.2/sql/unsupported-postgres-features.md index a89650e38e9..9ad89047189 100644 --- a/src/current/_includes/v25.2/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v25.2/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/_includes/v25.3/backward-incompatible/alpha.1.md b/src/current/_includes/v25.3/backward-incompatible/alpha.1.md index b8b1f37d137..8f45ba00d36 100644 --- a/src/current/_includes/v25.3/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v25.3/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values](../v23.1/bool.html) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node](../v23.1/node-shutdown.html) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine](../v23.1/architecture/storage-layer.html#pebble) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree](../v23.1/architecture/storage-layer.html#log-structured-merge-trees)). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v25.3/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v25.3/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`](../v23.1/import.html), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`](../v23.1/copy.html) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link v25.3/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link v25.3/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v25.3/cdc/avro-udt-composite.md b/src/current/_includes/v25.3/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v25.3/cdc/avro-udt-composite.md +++ b/src/current/_includes/v25.3/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v25.3/cdc/csv-udt-composite.md b/src/current/_includes/v25.3/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v25.3/cdc/csv-udt-composite.md +++ b/src/current/_includes/v25.3/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v25.3/faq/what-is-crdb.md b/src/current/_includes/v25.3/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v25.3/faq/what-is-crdb.md +++ b/src/current/_includes/v25.3/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v25.3/finalization-required/119894.md b/src/current/_includes/v25.3/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v25.3/finalization-required/119894.md +++ b/src/current/_includes/v25.3/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v25.3/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md b/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/aost-limitations.md b/src/current/_includes/v25.3/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v25.3/known-limitations/aost-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v25.3/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v25.3/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/cdc-queries.md b/src/current/_includes/v25.3/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v25.3/known-limitations/cdc-queries.md +++ b/src/current/_includes/v25.3/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v25.3/known-limitations/cdc.md b/src/current/_includes/v25.3/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v25.3/known-limitations/cdc.md +++ b/src/current/_includes/v25.3/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v25.3/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/citext-limitations.md b/src/current/_includes/v25.3/known-limitations/citext-limitations.md index df0fbc0ff91..c7e43cd2949 100644 --- a/src/current/_includes/v25.3/known-limitations/citext-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/citext-limitations.md @@ -1 +1 @@ -- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. [#149791](https://github.com/cockroachdb/cockroach/issues/149791) \ No newline at end of file +- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. #149791 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/copy-syntax.md b/src/current/_includes/v25.3/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v25.3/known-limitations/copy-syntax.md +++ b/src/current/_includes/v25.3/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v25.3/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/distsql-heterogeneous-endianness.md b/src/current/_includes/v25.3/known-limitations/distsql-heterogeneous-endianness.md index 9194395dc04..7aa2963023e 100644 --- a/src/current/_includes/v25.3/known-limitations/distsql-heterogeneous-endianness.md +++ b/src/current/_includes/v25.3/known-limitations/distsql-heterogeneous-endianness.md @@ -1 +1 @@ -In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. [#148773](https://github.com/cockroachdb/cockroach/issues/148773) \ No newline at end of file +In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. #148773 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md b/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v25.3/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md index 90745f7e17a..2a633ccb369 100644 --- a/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/drop-trigger-limitations.md @@ -1 +1 @@ -[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. #128151 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/enforce-home-region-limitations.md b/src/current/_includes/v25.3/known-limitations/enforce-home-region-limitations.md index 693829358f7..53ce961b902 100644 --- a/src/current/_includes/v25.3/known-limitations/enforce-home-region-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/enforce-home-region-limitations.md @@ -1 +1 @@ -With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. [#148375](https://github.com/cockroachdb/cockroach/issues/148375) \ No newline at end of file +With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. #148375 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md b/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v25.3/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/geospatial-heterogeneous-architectures.md b/src/current/_includes/v25.3/known-limitations/geospatial-heterogeneous-architectures.md index 4bb9633f138..55b706c3d6d 100644 --- a/src/current/_includes/v25.3/known-limitations/geospatial-heterogeneous-architectures.md +++ b/src/current/_includes/v25.3/known-limitations/geospatial-heterogeneous-architectures.md @@ -1 +1 @@ -Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. [#148783](https://github.com/cockroachdb/cockroach/issues/148783) \ No newline at end of file +Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. #148783 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md b/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md index 9b51bfb6e87..a74232bd40b 100644 --- a/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/jsonpath-limitations.md @@ -1,2 +1,2 @@ -- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. [#144255](https://github.com/cockroachdb/cockroach/issues/144255) -- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. [#145099](https://github.com/cockroachdb/cockroach/issues/145099) \ No newline at end of file +- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. #144255 +- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. #145099 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-column-families.md b/src/current/_includes/v25.3/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v25.3/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v25.3/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md b/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v25.3/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-indexes.md b/src/current/_includes/v25.3/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v25.3/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v25.3/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-sequences.md b/src/current/_includes/v25.3/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v25.3/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v25.3/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-triggers.md b/src/current/_includes/v25.3/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v25.3/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v25.3/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/ldr-udfs.md b/src/current/_includes/v25.3/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v25.3/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v25.3/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v25.3/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v25.3/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v25.3/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md b/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md index c17954748a0..6050d209753 100644 --- a/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/plpgsql-limitations.md @@ -1,25 +1,25 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md b/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md index 63f83b15dd8..1f466997ed8 100644 --- a/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/read-committed-limitations.md @@ -1,6 +1,6 @@ -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md b/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v25.3/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v25.3/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v25.3/known-limitations/restore-udf.md b/src/current/_includes/v25.3/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v25.3/known-limitations/restore-udf.md +++ b/src/current/_includes/v25.3/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/rls-update-set-where-returning.md b/src/current/_includes/v25.3/known-limitations/rls-update-set-where-returning.md index 450de11683e..e6f2b296ba4 100644 --- a/src/current/_includes/v25.3/known-limitations/rls-update-set-where-returning.md +++ b/src/current/_includes/v25.3/known-limitations/rls-update-set-where-returning.md @@ -1 +1 @@ -`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. [#145894](https://github.com/cockroachdb/cockroach/issues/145894) \ No newline at end of file +`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. #145894 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md b/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md index c85dea7987a..d274e573dc9 100644 --- a/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md +++ b/src/current/_includes/v25.3/known-limitations/rls-values-on-conflict-do-nothing.md @@ -1 +1 @@ -`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. [#35370](https://github.com/cockroachdb/cockroach/issues/35370). +`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. #35370. diff --git a/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md b/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md index 453059825a3..5d16325382f 100644 --- a/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md +++ b/src/current/_includes/v25.3/known-limitations/rls-visibility-issue.md @@ -1 +1 @@ -Under certain conditions, such as when executing certain SQL functions, CockroachDB's row-level security (RLS) implementation exposes metadata about the number of restricted rows in a table. For example, when a user applies arbitrary SQL filters on a table with RLS enabled, it's possible for the user to see how many total rows are in the table; this count includes rows that the user does not have direct access to. This metadata leakage can also occur when statements like [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain.md %}) are used, as the output includes a count of the number of rows scanned by the query that can include the number of restricted rows, even though the rows themselves are not directly accessible. [#146952](https://github.com/cockroachdb/cockroach/issues/146952) +Under certain conditions, such as when executing certain SQL functions, CockroachDB's row-level security (RLS) implementation exposes metadata about the number of restricted rows in a table. For example, when a user applies arbitrary SQL filters on a table with RLS enabled, it's possible for the user to see how many total rows are in the table; this count includes rows that the user does not have direct access to. This metadata leakage can also occur when statements like [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain.md %}) are used, as the output includes a count of the number of rows scanned by the query that can include the number of restricted rows, even though the rows themselves are not directly accessible. #146952 diff --git a/src/current/_includes/v25.3/known-limitations/routine-limitations.md b/src/current/_includes/v25.3/known-limitations/routine-limitations.md index 4718c6c7abf..fa16791b56a 100644 --- a/src/current/_includes/v25.3/known-limitations/routine-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. 123048 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md b/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md index 1ed68e692e4..e599353243a 100644 --- a/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/select-for-update-limitations.md @@ -1,4 +1,4 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). diff --git a/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md index e70e3c32533..bc998f44d9e 100644 --- a/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v25.3/known-limitations/set-transaction-no-rollback.md @@ -33,4 +33,4 @@ SHOW TIME ZONE; ~~~ -[#69396](https://github.com/cockroachdb/cockroach/issues/69396), [#148766](https://github.com/cockroachdb/cockroach/issues/148766) \ No newline at end of file +#69396, #148766 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md b/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v25.3/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/sql-cursors.md b/src/current/_includes/v25.3/known-limitations/sql-cursors.md index bceff96d5a6..1924400e85d 100644 --- a/src/current/_includes/v25.3/known-limitations/sql-cursors.md +++ b/src/current/_includes/v25.3/known-limitations/sql-cursors.md @@ -1,8 +1,8 @@ CockroachDB implements SQL {% if page.name == "known-limitations.md" %} [cursor]({% link {{ page.version.version }}/cursors.md %}) {% else %} cursor {% endif %} support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md b/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v25.3/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md b/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md index b2ba1b61562..bce2e47d6b8 100644 --- a/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/stored-proc-limitations.md @@ -1,3 +1,3 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/trigger-limitations.md b/src/current/_includes/v25.3/known-limitations/trigger-limitations.md index 7373cd486e7..f2514d24113 100644 --- a/src/current/_includes/v25.3/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/trigger-limitations.md @@ -1,6 +1,6 @@ -- `CREATE OR REPLACE TRIGGER` is not supported. [#128422](https://github.com/cockroachdb/cockroach/issues/128422) -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) +- `CREATE OR REPLACE TRIGGER` is not supported. #128422 +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. #134555 +- Hidden columns are not visible to triggers. #133331 - {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v25.3/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/udf-limitations.md b/src/current/_includes/v25.3/known-limitations/udf-limitations.md index cd749ced7e4..5bd28ddf1d1 100644 --- a/src/current/_includes/v25.3/known-limitations/udf-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/udf-limitations.md @@ -1,8 +1,8 @@ {% if page.name != "known-limitations.md" # New limitations in v24.2 %} {% endif %} -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 +- User-defined functions cannot call themselves recursively. #93049 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/vector-limitations.md b/src/current/_includes/v25.3/known-limitations/vector-limitations.md index 97ed7c47599..053087efe7e 100644 --- a/src/current/_includes/v25.3/known-limitations/vector-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/vector-limitations.md @@ -1,5 +1,5 @@ - {% include {{ page.version.version }}/sql/vector-batch-inserts.md %} -- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. [#145227](https://github.com/cockroachdb/cockroach/issues/145227) -- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. [#147839](https://github.com/cockroachdb/cockroach/issues/147839) -- Index acceleration with filters is only supported if the filters match prefix columns. [#146145](https://github.com/cockroachdb/cockroach/issues/146145) -- Index recommendations are not provided for vector indexes. [#146146](https://github.com/cockroachdb/cockroach/issues/146146) \ No newline at end of file +- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. #145227 +- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. #147839 +- Index acceleration with filters is only supported if the filters match prefix columns. #146145 +- Index recommendations are not provided for vector indexes. #146146 \ No newline at end of file diff --git a/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v25.3/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v25.3/misc/tooling.md b/src/current/_includes/v25.3/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v25.3/misc/tooling.md +++ b/src/current/_includes/v25.3/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v25.3/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v25.3/orchestration/test-cluster-secure.md b/src/current/_includes/v25.3/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v25.3/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v25.3/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v25.3/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v25.3/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v25.4/backward-incompatible/alpha.1.md b/src/current/_includes/v25.4/backward-incompatible/alpha.1.md index 8d4004422ad..f10881f0418 100644 --- a/src/current/_includes/v25.4/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v25.4/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v25.4/cdc/avro-udt-composite.md b/src/current/_includes/v25.4/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v25.4/cdc/avro-udt-composite.md +++ b/src/current/_includes/v25.4/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v25.4/cdc/csv-udt-composite.md b/src/current/_includes/v25.4/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v25.4/cdc/csv-udt-composite.md +++ b/src/current/_includes/v25.4/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v25.4/faq/what-is-crdb.md b/src/current/_includes/v25.4/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v25.4/faq/what-is-crdb.md +++ b/src/current/_includes/v25.4/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v25.4/finalization-required/119894.md b/src/current/_includes/v25.4/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v25.4/finalization-required/119894.md +++ b/src/current/_includes/v25.4/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v25.4/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v25.4/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v25.4/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v25.4/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v25.4/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v25.4/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v25.4/known-limitations/alter-sequence-limitations.md b/src/current/_includes/v25.4/known-limitations/alter-sequence-limitations.md index 7343a1d1f1e..ba6bc88aa32 100644 --- a/src/current/_includes/v25.4/known-limitations/alter-sequence-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/alter-sequence-limitations.md @@ -1 +1 @@ -- Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +- Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 diff --git a/src/current/_includes/v25.4/known-limitations/alter-table-add-column-limitations.md b/src/current/_includes/v25.4/known-limitations/alter-table-add-column-limitations.md index a8e71e5e59c..a8249cfb0f8 100644 --- a/src/current/_includes/v25.4/known-limitations/alter-table-add-column-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/alter-table-add-column-limitations.md @@ -2,7 +2,7 @@ - The column uses a [sequence]({% link {{ page.version.version }}/create-sequence.md %}) as the [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) value, for example using `nextval()`. - The column uses `GENERATED ALWAYS AS IDENTITY` or `GENERATED BY DEFAULT AS IDENTITY`, unless the table being altered is empty. - This is because CockroachDB does not support back-filling sequential column data. [#42508](https://github.com/cockroachdb/cockroach/issues/42508) + This is because CockroachDB does not support back-filling sequential column data. #42508 - When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statement with a [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) expression, new values generated: - Use the default [search path]({% link {{ page.version.version }}/sql-name-resolution.md %}#search-path) regardless of the search path configured in the current session via `SET SEARCH_PATH`. - Use the UTC time zone regardless of the time zone configured in the current session via [`SET TIME ZONE`]({% link {{ page.version.version }}/set-vars.md %}). diff --git a/src/current/_includes/v25.4/known-limitations/alter-view-limitations.md b/src/current/_includes/v25.4/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v25.4/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/aost-limitations.md b/src/current/_includes/v25.4/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v25.4/known-limitations/aost-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v25.4/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v25.4/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v25.4/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v25.4/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v25.4/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v25.4/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v25.4/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/cdc-queries.md b/src/current/_includes/v25.4/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v25.4/known-limitations/cdc-queries.md +++ b/src/current/_includes/v25.4/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v25.4/known-limitations/cdc.md b/src/current/_includes/v25.4/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v25.4/known-limitations/cdc.md +++ b/src/current/_includes/v25.4/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v25.4/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v25.4/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v25.4/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/citext-limitations.md b/src/current/_includes/v25.4/known-limitations/citext-limitations.md index df0fbc0ff91..c7e43cd2949 100644 --- a/src/current/_includes/v25.4/known-limitations/citext-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/citext-limitations.md @@ -1 +1 @@ -- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. [#149791](https://github.com/cockroachdb/cockroach/issues/149791) \ No newline at end of file +- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. #149791 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/collate-limitations.md b/src/current/_includes/v25.4/known-limitations/collate-limitations.md index d3e3e712bb5..7ba94b89053 100644 --- a/src/current/_includes/v25.4/known-limitations/collate-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/collate-limitations.md @@ -1 +1 @@ -- Many string operations are not properly overloaded for [collated strings]({% link {{ page.version.version }}/collate.md %}). For example, the `||` concatenation operator works with regular strings but returns an error with collated strings. [#10679](https://github.com/cockroachdb/cockroach/issues/10679) +- Many string operations are not properly overloaded for [collated strings]({% link {{ page.version.version }}/collate.md %}). For example, the `||` concatenation operator works with regular strings but returns an error with collated strings. #10679 diff --git a/src/current/_includes/v25.4/known-limitations/comment-on-limitations.md b/src/current/_includes/v25.4/known-limitations/comment-on-limitations.md index 568b31ec737..757f74e10f0 100644 --- a/src/current/_includes/v25.4/known-limitations/comment-on-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/comment-on-limitations.md @@ -1 +1 @@ -- The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of an individual table or database. As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +- The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of an individual table or database. As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 diff --git a/src/current/_includes/v25.4/known-limitations/composite-type-limitations.md b/src/current/_includes/v25.4/known-limitations/composite-type-limitations.md index 1ceecb5d889..48ce4591246 100644 --- a/src/current/_includes/v25.4/known-limitations/composite-type-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/composite-type-limitations.md @@ -1,2 +1,2 @@ -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 diff --git a/src/current/_includes/v25.4/known-limitations/copy-syntax.md b/src/current/_includes/v25.4/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v25.4/known-limitations/copy-syntax.md +++ b/src/current/_includes/v25.4/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v25.4/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v25.4/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v25.4/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v25.4/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/data-domiciling-limitations.md b/src/current/_includes/v25.4/known-limitations/data-domiciling-limitations.md index b4e1092fdac..7d8faa3ce97 100644 --- a/src/current/_includes/v25.4/known-limitations/data-domiciling-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/data-domiciling-limitations.md @@ -1,4 +1,4 @@ -- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. [#150783](https://github.com/cockroachdb/cockroach/issues/150783) +- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. #150783 - When columns are [indexed]({% link {{ page.version.version }}/indexes.md %}), a subset of data from the indexed columns may appear in [meta ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#meta-ranges) or other system tables. CockroachDB synchronizes these system ranges and system tables across nodes. This synchronization does not respect any multi-region settings applied via either the [multi-region SQL statements]({% link {{ page.version.version }}/multiregion-overview.md %}), or the low-level [zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) mechanism. - [Zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) can be used for data placement but these features were historically built for performance, not for domiciling. The replication system's top priority is to prevent the loss of data and it may override the zone configurations if necessary to ensure data durability. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}#types-of-constraints). - If your [log files]({% link {{ page.version.version }}/logging-overview.md %}) are kept in the region where they were generated, there is some cross-region leakage (like the system tables described previously), but the majority of user data that makes it into the logs is going to be homed in that region. If that's not strong enough, you can use the [log redaction functionality]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) to strip all raw data from the logs. You can also limit your log retention entirely. diff --git a/src/current/_includes/v25.4/known-limitations/distsql-heterogeneous-endianness.md b/src/current/_includes/v25.4/known-limitations/distsql-heterogeneous-endianness.md index 9194395dc04..7aa2963023e 100644 --- a/src/current/_includes/v25.4/known-limitations/distsql-heterogeneous-endianness.md +++ b/src/current/_includes/v25.4/known-limitations/distsql-heterogeneous-endianness.md @@ -1 +1 @@ -In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. [#148773](https://github.com/cockroachdb/cockroach/issues/148773) \ No newline at end of file +In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. #148773 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/drop-column-partial-index.md b/src/current/_includes/v25.4/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v25.4/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v25.4/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v25.4/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v25.4/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v25.4/known-limitations/drop-trigger-limitations.md index 90745f7e17a..2a633ccb369 100644 --- a/src/current/_includes/v25.4/known-limitations/drop-trigger-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/drop-trigger-limitations.md @@ -1 +1 @@ -[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. #128151 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/enforce-home-region-limitations.md b/src/current/_includes/v25.4/known-limitations/enforce-home-region-limitations.md index 693829358f7..53ce961b902 100644 --- a/src/current/_includes/v25.4/known-limitations/enforce-home-region-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/enforce-home-region-limitations.md @@ -1 +1 @@ -With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. [#148375](https://github.com/cockroachdb/cockroach/issues/148375) \ No newline at end of file +With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. #148375 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/expression-index-limitations.md b/src/current/_includes/v25.4/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v25.4/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v25.4/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v25.4/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v25.4/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v25.4/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v25.4/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v25.4/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v25.4/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v25.4/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/geospatial-heterogeneous-architectures.md b/src/current/_includes/v25.4/known-limitations/geospatial-heterogeneous-architectures.md index 4bb9633f138..55b706c3d6d 100644 --- a/src/current/_includes/v25.4/known-limitations/geospatial-heterogeneous-architectures.md +++ b/src/current/_includes/v25.4/known-limitations/geospatial-heterogeneous-architectures.md @@ -1 +1 @@ -Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. [#148783](https://github.com/cockroachdb/cockroach/issues/148783) \ No newline at end of file +Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. #148783 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/int-limitations.md b/src/current/_includes/v25.4/known-limitations/int-limitations.md index 6333dbf6ed5..defda16db88 100644 --- a/src/current/_includes/v25.4/known-limitations/int-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/int-limitations.md @@ -1 +1 @@ -- When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::INT`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +- When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::INT`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 diff --git a/src/current/_includes/v25.4/known-limitations/inverted-index-limitations.md b/src/current/_includes/v25.4/known-limitations/inverted-index-limitations.md index 1b0eb8d4335..8c1668311a9 100644 --- a/src/current/_includes/v25.4/known-limitations/inverted-index-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/inverted-index-limitations.md @@ -1,3 +1,3 @@ -- CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) -- CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables for `tsvector` and `tsquery` types. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) -- [Left joins]({% link {{ page.version.version }}/joins.md %}#left-outer-joins) and anti joins involving [`JSONB`]({% link {{ page.version.version }}/jsonb.md %}), [`ARRAY`]({% link {{ page.version.version }}/array.md %}), or [spatial-typed]({% link {{ page.version.version }}/export-spatial-data.md %}) columns with a multi-column or [partitioned]({% link {{ page.version.version }}/alter-index.md %}#partition-by) [GIN index](inverted-indexes.html) will not take advantage of the index if the prefix columns of the index are unconstrained, or if they are constrained to multiple, constant values. To work around this limitation, make sure that the prefix columns of the index are either constrained to single constant values, or are part of an equality condition with an input column. [#59649](https://github.com/cockroachdb/cockroach/issues/59649) +- CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 +- CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables for `tsvector` and `tsquery` types. #102731 +- [Left joins]({% link {{ page.version.version }}/joins.md %}#left-outer-joins) and anti joins involving [`JSONB`]({% link {{ page.version.version }}/jsonb.md %}), [`ARRAY`]({% link {{ page.version.version }}/array.md %}), or [spatial-typed]({% link {{ page.version.version }}/export-spatial-data.md %}) columns with a multi-column or [partitioned]({% link {{ page.version.version }}/alter-index.md %}#partition-by) [GIN index](inverted-indexes.html) will not take advantage of the index if the prefix columns of the index are unconstrained, or if they are constrained to multiple, constant values. To work around this limitation, make sure that the prefix columns of the index are either constrained to single constant values, or are part of an equality condition with an input column. #59649 diff --git a/src/current/_includes/v25.4/known-limitations/jsonpath-limitations.md b/src/current/_includes/v25.4/known-limitations/jsonpath-limitations.md index 9b51bfb6e87..a74232bd40b 100644 --- a/src/current/_includes/v25.4/known-limitations/jsonpath-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/jsonpath-limitations.md @@ -1,2 +1,2 @@ -- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. [#144255](https://github.com/cockroachdb/cockroach/issues/144255) -- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. [#145099](https://github.com/cockroachdb/cockroach/issues/145099) \ No newline at end of file +- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. #144255 +- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. #145099 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/ldr-column-families.md b/src/current/_includes/v25.4/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v25.4/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v25.4/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/ldr-composite-primary.md b/src/current/_includes/v25.4/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v25.4/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v25.4/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/ldr-indexes.md b/src/current/_includes/v25.4/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v25.4/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v25.4/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/ldr-sequences.md b/src/current/_includes/v25.4/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v25.4/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v25.4/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/ldr-triggers.md b/src/current/_includes/v25.4/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v25.4/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v25.4/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/ldr-udfs.md b/src/current/_includes/v25.4/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v25.4/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v25.4/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/like-escape-performance.md b/src/current/_includes/v25.4/known-limitations/like-escape-performance.md index 845fdddeeb9..45d8f380e3d 100644 --- a/src/current/_includes/v25.4/known-limitations/like-escape-performance.md +++ b/src/current/_includes/v25.4/known-limitations/like-escape-performance.md @@ -1 +1 @@ -`LIKE` queries with an `ESCAPE` clause cannot use index acceleration, which can result in significantly slower performance compared to standard `LIKE` queries. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +`LIKE` queries with an `ESCAPE` clause cannot use index acceleration, which can result in significantly slower performance compared to standard `LIKE` queries. #30192 diff --git a/src/current/_includes/v25.4/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v25.4/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v25.4/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v25.4/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v25.4/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v25.4/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v25.4/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v25.4/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v25.4/known-limitations/max-row-size-limitations.md b/src/current/_includes/v25.4/known-limitations/max-row-size-limitations.md index 9d4d3e12918..705e6fcc9f7 100644 --- a/src/current/_includes/v25.4/known-limitations/max-row-size-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/max-row-size-limitations.md @@ -1 +1 @@ -- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 diff --git a/src/current/_includes/v25.4/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v25.4/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v25.4/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v25.4/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/node-shutdown-limitations.md b/src/current/_includes/v25.4/known-limitations/node-shutdown-limitations.md index f35f858bdc1..d7eb712ee06 100644 --- a/src/current/_includes/v25.4/known-limitations/node-shutdown-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/node-shutdown-limitations.md @@ -2,4 +2,4 @@ - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run - This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) + This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 diff --git a/src/current/_includes/v25.4/known-limitations/null-limitations.md b/src/current/_includes/v25.4/known-limitations/null-limitations.md index ad2efce233a..ba230019779 100644 --- a/src/current/_includes/v25.4/known-limitations/null-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/null-limitations.md @@ -1 +1 @@ -- By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +- By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 diff --git a/src/current/_includes/v25.4/known-limitations/online-schema-changes-limitations.md b/src/current/_includes/v25.4/known-limitations/online-schema-changes-limitations.md index be6d00c26ce..8f58936ac08 100644 --- a/src/current/_includes/v25.4/known-limitations/online-schema-changes-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/online-schema-changes-limitations.md @@ -28,9 +28,9 @@ You cannot start an online schema change on a table if a [primary key change]({% Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: diff --git a/src/current/_includes/v25.4/known-limitations/partition-limitations.md b/src/current/_includes/v25.4/known-limitations/partition-limitations.md index f8ba5e979fa..b0bcd4b1d51 100644 --- a/src/current/_includes/v25.4/known-limitations/partition-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/partition-limitations.md @@ -1,3 +1,3 @@ - When defining a [table partition]({% link {{ page.version.version }}/partitioning.md %}), either during table creation or table alteration, it is not possible to use placeholders in the `PARTITION BY` clause. - CockroachDB does not currently support dropping a single partition from a table. In order to remove partitions, you can [repartition]({% unless page.name == "partitioning.md" %}{% link {{ page.version.version }}/partitioning.md %}{% endunless %}#repartition-a-table) the table. -- In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +- In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 diff --git a/src/current/_includes/v25.4/known-limitations/plpgsql-limitations.md b/src/current/_includes/v25.4/known-limitations/plpgsql-limitations.md index 60b3104942e..09fd0183a40 100644 --- a/src/current/_includes/v25.4/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/plpgsql-limitations.md @@ -1,23 +1,23 @@ -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/read-committed-limitations.md b/src/current/_includes/v25.4/known-limitations/read-committed-limitations.md index b4722a0c369..1beeca3c6ce 100644 --- a/src/current/_includes/v25.4/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/read-committed-limitations.md @@ -1,7 +1,7 @@ -- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. [#151663](https://github.com/cockroachdb/cockroach/issues/151663#issuecomment-3222083180) -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. #151663 +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/restore-multiregion-match.md b/src/current/_includes/v25.4/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v25.4/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v25.4/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v25.4/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v25.4/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v25.4/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v25.4/known-limitations/restore-udf.md b/src/current/_includes/v25.4/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v25.4/known-limitations/restore-udf.md +++ b/src/current/_includes/v25.4/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/rls-update-set-where-returning.md b/src/current/_includes/v25.4/known-limitations/rls-update-set-where-returning.md index 450de11683e..e6f2b296ba4 100644 --- a/src/current/_includes/v25.4/known-limitations/rls-update-set-where-returning.md +++ b/src/current/_includes/v25.4/known-limitations/rls-update-set-where-returning.md @@ -1 +1 @@ -`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. [#145894](https://github.com/cockroachdb/cockroach/issues/145894) \ No newline at end of file +`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. #145894 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/rls-values-on-conflict-do-nothing.md b/src/current/_includes/v25.4/known-limitations/rls-values-on-conflict-do-nothing.md index c85dea7987a..d274e573dc9 100644 --- a/src/current/_includes/v25.4/known-limitations/rls-values-on-conflict-do-nothing.md +++ b/src/current/_includes/v25.4/known-limitations/rls-values-on-conflict-do-nothing.md @@ -1 +1 @@ -`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. [#35370](https://github.com/cockroachdb/cockroach/issues/35370). +`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. #35370. diff --git a/src/current/_includes/v25.4/known-limitations/routine-limitations.md b/src/current/_includes/v25.4/known-limitations/routine-limitations.md index 7c958004f70..1f5228b9e77 100644 --- a/src/current/_includes/v25.4/known-limitations/routine-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/routine-limitations.md @@ -1,8 +1,8 @@ -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [123048](https://github.com/cockroachdb/cockroach/issues/123048) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. 123048 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v25.4/known-limitations/row-level-ttl-limitations.md index c386ba576d7..4460901566d 100644 --- a/src/current/_includes/v25.4/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v25.4/known-limitations/savepoint-limitations.md b/src/current/_includes/v25.4/known-limitations/savepoint-limitations.md index 58451f4ae28..1bc0bdd8d44 100644 --- a/src/current/_includes/v25.4/known-limitations/savepoint-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/savepoint-limitations.md @@ -1 +1 @@ -- Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities) that contain DDL and `ROLLBACK TO SAVEPOINT` are not supported, as they could result in a deadlock. [#46414](https://github.com/cockroachdb/cockroach/issues/46414) +- Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities) that contain DDL and `ROLLBACK TO SAVEPOINT` are not supported, as they could result in a deadlock. #46414 diff --git a/src/current/_includes/v25.4/known-limitations/select-for-update-limitations.md b/src/current/_includes/v25.4/known-limitations/select-for-update-limitations.md index 9c4ab3fd4a1..89152e6fad0 100644 --- a/src/current/_includes/v25.4/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/select-for-update-limitations.md @@ -1,8 +1,8 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). When running under `SERIALIZABLE` isolation, `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` locks should be thought of as best-effort, and should not be relied upon for correctness. Note that [serialization]({% link {{ page.version.version }}/demo-serializable.md %}) is preserved despite this limitation. This limitation is fixed when the `enable_durable_locking_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}#enable-durable-locking-for-serializable) is set to `true`. This limitation does **not** apply to [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions. - The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroachdb/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) \ No newline at end of file +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v25.4/known-limitations/set-transaction-no-rollback.md index 4f4e5af2aab..966dcd82d48 100644 --- a/src/current/_includes/v25.4/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v25.4/known-limitations/set-transaction-no-rollback.md @@ -30,4 +30,4 @@ UTC ~~~ - [#69396](https://github.com/cockroachdb/cockroach/issues/69396), [#148766](https://github.com/cockroachdb/cockroach/issues/148766) \ No newline at end of file + #69396, #148766 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/show-backup-symlink.md b/src/current/_includes/v25.4/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v25.4/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v25.4/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/spatial-limitations.md b/src/current/_includes/v25.4/known-limitations/spatial-limitations.md index 6167ecb02fa..1a7573a23dd 100644 --- a/src/current/_includes/v25.4/known-limitations/spatial-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/spatial-limitations.md @@ -1,10 +1,10 @@ -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} - {% include {{ page.version.version }}/known-limitations/geospatial-heterogeneous-architectures.md %} -- [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled on ARM Macs due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [#93161](https://github.com/cockroachdb/cockroach/issues/93161) +- [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled on ARM Macs due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. #93161 diff --git a/src/current/_includes/v25.4/known-limitations/sql-cursors.md b/src/current/_includes/v25.4/known-limitations/sql-cursors.md index a3e42b9a3ec..d0ce2b3c3af 100644 --- a/src/current/_includes/v25.4/known-limitations/sql-cursors.md +++ b/src/current/_includes/v25.4/known-limitations/sql-cursors.md @@ -1,8 +1,8 @@ CockroachDB implements SQL [cursor]({% link {{ page.version.version }}/cursors.md %}) support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v25.4/known-limitations/sql-guardrails-limitations.md b/src/current/_includes/v25.4/known-limitations/sql-guardrails-limitations.md index 6d8d60ac614..02ebf98bcd7 100644 --- a/src/current/_includes/v25.4/known-limitations/sql-guardrails-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/sql-guardrails-limitations.md @@ -1,2 +1,2 @@ -- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) -- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 +- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 diff --git a/src/current/_includes/v25.4/known-limitations/srid-4326-limitations.md b/src/current/_includes/v25.4/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v25.4/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v25.4/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v25.4/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v25.4/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v25.4/known-limitations/stored-proc-limitations.md b/src/current/_includes/v25.4/known-limitations/stored-proc-limitations.md index b2aae6069c2..74f4d86dfb3 100644 --- a/src/current/_includes/v25.4/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/stored-proc-limitations.md @@ -1,2 +1,2 @@ -- Pausable portals are not supported with `CALL` statements for stored procedures. [#151529](https://github.com/cockroachdb/cockroach/issues/151529) -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- Pausable portals are not supported with `CALL` statements for stored procedures. #151529 +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/subquery-mutations-limitations.md b/src/current/_includes/v25.4/known-limitations/subquery-mutations-limitations.md index 771eba80bc6..d4deeaee17c 100644 --- a/src/current/_includes/v25.4/known-limitations/subquery-mutations-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/subquery-mutations-limitations.md @@ -2,4 +2,4 @@ - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). - If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) + If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 diff --git a/src/current/_includes/v25.4/known-limitations/transaction-row-count-limitations.md b/src/current/_includes/v25.4/known-limitations/transaction-row-count-limitations.md index ded62d6d9be..a54f15d61a7 100644 --- a/src/current/_includes/v25.4/known-limitations/transaction-row-count-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/transaction-row-count-limitations.md @@ -1 +1 @@ -- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 diff --git a/src/current/_includes/v25.4/known-limitations/trigger-limitations.md b/src/current/_includes/v25.4/known-limitations/trigger-limitations.md index 66fcd0018ed..aaf49607557 100644 --- a/src/current/_includes/v25.4/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/trigger-limitations.md @@ -1,10 +1,10 @@ -- `CREATE OR REPLACE TRIGGER` is not supported. [#128422](https://github.com/cockroachdb/cockroach/issues/128422) -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) -- The `REFERENCING` clause for `CREATE TRIGGER` is not supported. [#135655](https://github.com/cockroachdb/cockroach/issues/135655) -- CockroachDB uses one-based indexing for the `TG_ARGV` array to maintain consistency with its array indexing system. This differs from PostgreSQL, where `TG_ARGV` uses zero-based indexing, unlike other PostgreSQL arrays. Trigger functions that reference `TG_ARGV` need to be adjusted when migrating from PostgreSQL. [#135311](https://github.com/cockroachdb/cockroach/issues/135311) -- `UPDATE` triggers with a column list (using `UPDATE OF column_name` syntax) are not supported. [#135656](https://github.com/cockroachdb/cockroach/issues/135656) -- Statement-level triggers for `TRUNCATE` events are not supported. [#135657](https://github.com/cockroachdb/cockroach/issues/135657) +- `CREATE OR REPLACE TRIGGER` is not supported. #128422 +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. #134555 +- Hidden columns are not visible to triggers. #133331 +- The `REFERENCING` clause for `CREATE TRIGGER` is not supported. #135655 +- CockroachDB uses one-based indexing for the `TG_ARGV` array to maintain consistency with its array indexing system. This differs from PostgreSQL, where `TG_ARGV` uses zero-based indexing, unlike other PostgreSQL arrays. Trigger functions that reference `TG_ARGV` need to be adjusted when migrating from PostgreSQL. #135311 +- `UPDATE` triggers with a column list (using `UPDATE OF column_name` syntax) are not supported. #135656 +- Statement-level triggers for `TRUNCATE` events are not supported. #135657 - {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v25.4/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v25.4/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v25.4/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/udf-limitations.md b/src/current/_includes/v25.4/known-limitations/udf-limitations.md index 0f44ac320f5..c212736717b 100644 --- a/src/current/_includes/v25.4/known-limitations/udf-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/udf-limitations.md @@ -1,9 +1,9 @@ -- User-defined functions are not supported in partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). [#151686](https://github.com/cockroachdb/cockroach/issues/151686) -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- User-defined functions are not supported in partial index predicates. #155488 +- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). #151686 +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Partial index predicates. #155488 +- User-defined functions cannot call themselves recursively. #93049 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/vector-limitations.md b/src/current/_includes/v25.4/known-limitations/vector-limitations.md index 97ed7c47599..053087efe7e 100644 --- a/src/current/_includes/v25.4/known-limitations/vector-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/vector-limitations.md @@ -1,5 +1,5 @@ - {% include {{ page.version.version }}/sql/vector-batch-inserts.md %} -- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. [#145227](https://github.com/cockroachdb/cockroach/issues/145227) -- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. [#147839](https://github.com/cockroachdb/cockroach/issues/147839) -- Index acceleration with filters is only supported if the filters match prefix columns. [#146145](https://github.com/cockroachdb/cockroach/issues/146145) -- Index recommendations are not provided for vector indexes. [#146146](https://github.com/cockroachdb/cockroach/issues/146146) \ No newline at end of file +- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. #145227 +- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. #147839 +- Index acceleration with filters is only supported if the filters match prefix columns. #146145 +- Index recommendations are not provided for vector indexes. #146146 \ No newline at end of file diff --git a/src/current/_includes/v25.4/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v25.4/known-limitations/vectorized-engine-limitations.md index daea59ebf88..da227ad4825 100644 --- a/src/current/_includes/v25.4/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v25.4/known-limitations/view-limitations.md b/src/current/_includes/v25.4/known-limitations/view-limitations.md index 22b8a6c29fa..3402185907e 100644 --- a/src/current/_includes/v25.4/known-limitations/view-limitations.md +++ b/src/current/_includes/v25.4/known-limitations/view-limitations.md @@ -1,2 +1,2 @@ -- The `security_invoker` attribute for views is not supported. Views always use the view definer's privileges when checking permissions. [#138918](https://github.com/cockroachdb/cockroach/issues/138918) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) (including ENUMs) in views is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +- The `security_invoker` attribute for views is not supported. Views always use the view definer's privileges when checking permissions. #138918 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) (including ENUMs) in views is not supported. #108184 diff --git a/src/current/_includes/v25.4/misc/tooling.md b/src/current/_includes/v25.4/misc/tooling.md index dcd24363435..01dc63c840f 100644 --- a/src/current/_includes/v25.4/misc/tooling.md +++ b/src/current/_includes/v25.4/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Example Apps]({% link {{ page.version.version }}/example-apps.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Application frameworks diff --git a/src/current/_includes/v25.4/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v25.4/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v25.4/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v25.4/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.4/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v25.4/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v25.4/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v25.4/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.4/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v25.4/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v25.4/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v25.4/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v25.4/orchestration/test-cluster-secure.md b/src/current/_includes/v25.4/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v25.4/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v25.4/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v25.4/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v25.4/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v25.4/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v25.4/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v25.4/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v25.4/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v25.4/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v25.4/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v25.4/sql/unsupported-postgres-features.md b/src/current/_includes/v25.4/sql/unsupported-postgres-features.md index a89650e38e9..9ad89047189 100644 --- a/src/current/_includes/v25.4/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v25.4/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/_includes/v26.1/backward-incompatible/alpha.1.md b/src/current/_includes/v26.1/backward-incompatible/alpha.1.md index 8d4004422ad..f10881f0418 100644 --- a/src/current/_includes/v26.1/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v26.1/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v26.1/cdc/avro-udt-composite.md b/src/current/_includes/v26.1/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v26.1/cdc/avro-udt-composite.md +++ b/src/current/_includes/v26.1/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v26.1/cdc/csv-udt-composite.md b/src/current/_includes/v26.1/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v26.1/cdc/csv-udt-composite.md +++ b/src/current/_includes/v26.1/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v26.1/faq/what-is-crdb.md b/src/current/_includes/v26.1/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v26.1/faq/what-is-crdb.md +++ b/src/current/_includes/v26.1/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v26.1/finalization-required/119894.md b/src/current/_includes/v26.1/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v26.1/finalization-required/119894.md +++ b/src/current/_includes/v26.1/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v26.1/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v26.1/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v26.1/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v26.1/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v26.1/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v26.1/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v26.1/known-limitations/alter-sequence-limitations.md b/src/current/_includes/v26.1/known-limitations/alter-sequence-limitations.md index 7343a1d1f1e..ba6bc88aa32 100644 --- a/src/current/_includes/v26.1/known-limitations/alter-sequence-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/alter-sequence-limitations.md @@ -1 +1 @@ -- Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +- Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 diff --git a/src/current/_includes/v26.1/known-limitations/alter-table-add-column-limitations.md b/src/current/_includes/v26.1/known-limitations/alter-table-add-column-limitations.md index a8e71e5e59c..a8249cfb0f8 100644 --- a/src/current/_includes/v26.1/known-limitations/alter-table-add-column-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/alter-table-add-column-limitations.md @@ -2,7 +2,7 @@ - The column uses a [sequence]({% link {{ page.version.version }}/create-sequence.md %}) as the [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) value, for example using `nextval()`. - The column uses `GENERATED ALWAYS AS IDENTITY` or `GENERATED BY DEFAULT AS IDENTITY`, unless the table being altered is empty. - This is because CockroachDB does not support back-filling sequential column data. [#42508](https://github.com/cockroachdb/cockroach/issues/42508) + This is because CockroachDB does not support back-filling sequential column data. #42508 - When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statement with a [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) expression, new values generated: - Use the default [search path]({% link {{ page.version.version }}/sql-name-resolution.md %}#search-path) regardless of the search path configured in the current session via `SET SEARCH_PATH`. - Use the UTC time zone regardless of the time zone configured in the current session via [`SET TIME ZONE`]({% link {{ page.version.version }}/set-vars.md %}). diff --git a/src/current/_includes/v26.1/known-limitations/alter-view-limitations.md b/src/current/_includes/v26.1/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v26.1/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/aost-limitations.md b/src/current/_includes/v26.1/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v26.1/known-limitations/aost-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v26.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v26.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v26.1/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v26.1/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v26.1/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v26.1/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v26.1/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/cdc-queries.md b/src/current/_includes/v26.1/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v26.1/known-limitations/cdc-queries.md +++ b/src/current/_includes/v26.1/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v26.1/known-limitations/cdc.md b/src/current/_includes/v26.1/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v26.1/known-limitations/cdc.md +++ b/src/current/_includes/v26.1/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v26.1/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v26.1/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v26.1/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/citext-limitations.md b/src/current/_includes/v26.1/known-limitations/citext-limitations.md index df0fbc0ff91..c7e43cd2949 100644 --- a/src/current/_includes/v26.1/known-limitations/citext-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/citext-limitations.md @@ -1 +1 @@ -- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. [#149791](https://github.com/cockroachdb/cockroach/issues/149791) \ No newline at end of file +- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. #149791 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/collate-limitations.md b/src/current/_includes/v26.1/known-limitations/collate-limitations.md index d3e3e712bb5..7ba94b89053 100644 --- a/src/current/_includes/v26.1/known-limitations/collate-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/collate-limitations.md @@ -1 +1 @@ -- Many string operations are not properly overloaded for [collated strings]({% link {{ page.version.version }}/collate.md %}). For example, the `||` concatenation operator works with regular strings but returns an error with collated strings. [#10679](https://github.com/cockroachdb/cockroach/issues/10679) +- Many string operations are not properly overloaded for [collated strings]({% link {{ page.version.version }}/collate.md %}). For example, the `||` concatenation operator works with regular strings but returns an error with collated strings. #10679 diff --git a/src/current/_includes/v26.1/known-limitations/comment-on-limitations.md b/src/current/_includes/v26.1/known-limitations/comment-on-limitations.md index 568b31ec737..757f74e10f0 100644 --- a/src/current/_includes/v26.1/known-limitations/comment-on-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/comment-on-limitations.md @@ -1 +1 @@ -- The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of an individual table or database. As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +- The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of an individual table or database. As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 diff --git a/src/current/_includes/v26.1/known-limitations/composite-type-limitations.md b/src/current/_includes/v26.1/known-limitations/composite-type-limitations.md index 1ceecb5d889..48ce4591246 100644 --- a/src/current/_includes/v26.1/known-limitations/composite-type-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/composite-type-limitations.md @@ -1,2 +1,2 @@ -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 diff --git a/src/current/_includes/v26.1/known-limitations/copy-syntax.md b/src/current/_includes/v26.1/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v26.1/known-limitations/copy-syntax.md +++ b/src/current/_includes/v26.1/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v26.1/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v26.1/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v26.1/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v26.1/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/data-domiciling-limitations.md b/src/current/_includes/v26.1/known-limitations/data-domiciling-limitations.md index b4e1092fdac..7d8faa3ce97 100644 --- a/src/current/_includes/v26.1/known-limitations/data-domiciling-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/data-domiciling-limitations.md @@ -1,4 +1,4 @@ -- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. [#150783](https://github.com/cockroachdb/cockroach/issues/150783) +- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. #150783 - When columns are [indexed]({% link {{ page.version.version }}/indexes.md %}), a subset of data from the indexed columns may appear in [meta ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#meta-ranges) or other system tables. CockroachDB synchronizes these system ranges and system tables across nodes. This synchronization does not respect any multi-region settings applied via either the [multi-region SQL statements]({% link {{ page.version.version }}/multiregion-overview.md %}), or the low-level [zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) mechanism. - [Zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) can be used for data placement but these features were historically built for performance, not for domiciling. The replication system's top priority is to prevent the loss of data and it may override the zone configurations if necessary to ensure data durability. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}#types-of-constraints). - If your [log files]({% link {{ page.version.version }}/logging-overview.md %}) are kept in the region where they were generated, there is some cross-region leakage (like the system tables described previously), but the majority of user data that makes it into the logs is going to be homed in that region. If that's not strong enough, you can use the [log redaction functionality]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) to strip all raw data from the logs. You can also limit your log retention entirely. diff --git a/src/current/_includes/v26.1/known-limitations/distsql-heterogeneous-endianness.md b/src/current/_includes/v26.1/known-limitations/distsql-heterogeneous-endianness.md index 9194395dc04..7aa2963023e 100644 --- a/src/current/_includes/v26.1/known-limitations/distsql-heterogeneous-endianness.md +++ b/src/current/_includes/v26.1/known-limitations/distsql-heterogeneous-endianness.md @@ -1 +1 @@ -In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. [#148773](https://github.com/cockroachdb/cockroach/issues/148773) \ No newline at end of file +In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. #148773 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/drop-column-partial-index.md b/src/current/_includes/v26.1/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v26.1/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v26.1/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v26.1/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v26.1/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/drop-trigger-limitations.md b/src/current/_includes/v26.1/known-limitations/drop-trigger-limitations.md index 90745f7e17a..2a633ccb369 100644 --- a/src/current/_includes/v26.1/known-limitations/drop-trigger-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/drop-trigger-limitations.md @@ -1 +1 @@ -[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. [#128151](https://github.com/cockroachdb/cockroach/issues/128151) \ No newline at end of file +[`DROP TRIGGER`]({% link {{ page.version.version }}/drop-trigger.md %}) with `CASCADE` is not supported. #128151 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/enforce-home-region-limitations.md b/src/current/_includes/v26.1/known-limitations/enforce-home-region-limitations.md index 693829358f7..53ce961b902 100644 --- a/src/current/_includes/v26.1/known-limitations/enforce-home-region-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/enforce-home-region-limitations.md @@ -1 +1 @@ -With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. [#148375](https://github.com/cockroachdb/cockroach/issues/148375) \ No newline at end of file +With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. #148375 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/expression-index-limitations.md b/src/current/_includes/v26.1/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v26.1/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v26.1/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v26.1/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v26.1/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v26.1/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v26.1/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v26.1/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v26.1/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v26.1/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/geospatial-heterogeneous-architectures.md b/src/current/_includes/v26.1/known-limitations/geospatial-heterogeneous-architectures.md index 4bb9633f138..55b706c3d6d 100644 --- a/src/current/_includes/v26.1/known-limitations/geospatial-heterogeneous-architectures.md +++ b/src/current/_includes/v26.1/known-limitations/geospatial-heterogeneous-architectures.md @@ -1 +1 @@ -Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. [#148783](https://github.com/cockroachdb/cockroach/issues/148783) \ No newline at end of file +Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. #148783 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/int-limitations.md b/src/current/_includes/v26.1/known-limitations/int-limitations.md index 6333dbf6ed5..defda16db88 100644 --- a/src/current/_includes/v26.1/known-limitations/int-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/int-limitations.md @@ -1 +1 @@ -- When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::INT`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +- When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::INT`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 diff --git a/src/current/_includes/v26.1/known-limitations/inverted-index-limitations.md b/src/current/_includes/v26.1/known-limitations/inverted-index-limitations.md index a19a10295eb..108d5eaaba3 100644 --- a/src/current/_includes/v26.1/known-limitations/inverted-index-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/inverted-index-limitations.md @@ -1,3 +1,3 @@ -- CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) -- CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables for `tsvector` and `tsquery` types. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) -- [Left joins]({% link {{ page.version.version }}/joins.md %}#left-outer-joins) and anti joins involving [`JSONB`]({% link {{ page.version.version }}/jsonb.md %}), [`ARRAY`]({% link {{ page.version.version }}/array.md %}), or [spatial-typed]({% link {{ page.version.version }}/query-spatial-data.md %}) columns with a multi-column or [partitioned]({% link {{ page.version.version }}/alter-index.md %}#partition-by) [GIN index](inverted-indexes.html) will not take advantage of the index if the prefix columns of the index are unconstrained, or if they are constrained to multiple, constant values. To work around this limitation, make sure that the prefix columns of the index are either constrained to single constant values, or are part of an equality condition with an input column. [#59649](https://github.com/cockroachdb/cockroach/issues/59649) +- CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 +- CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables for `tsvector` and `tsquery` types. #102731 +- [Left joins]({% link {{ page.version.version }}/joins.md %}#left-outer-joins) and anti joins involving [`JSONB`]({% link {{ page.version.version }}/jsonb.md %}), [`ARRAY`]({% link {{ page.version.version }}/array.md %}), or [spatial-typed]({% link {{ page.version.version }}/query-spatial-data.md %}) columns with a multi-column or [partitioned]({% link {{ page.version.version }}/alter-index.md %}#partition-by) [GIN index](inverted-indexes.html) will not take advantage of the index if the prefix columns of the index are unconstrained, or if they are constrained to multiple, constant values. To work around this limitation, make sure that the prefix columns of the index are either constrained to single constant values, or are part of an equality condition with an input column. #59649 diff --git a/src/current/_includes/v26.1/known-limitations/jsonpath-limitations.md b/src/current/_includes/v26.1/known-limitations/jsonpath-limitations.md index 9b51bfb6e87..a74232bd40b 100644 --- a/src/current/_includes/v26.1/known-limitations/jsonpath-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/jsonpath-limitations.md @@ -1,2 +1,2 @@ -- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. [#144255](https://github.com/cockroachdb/cockroach/issues/144255) -- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. [#145099](https://github.com/cockroachdb/cockroach/issues/145099) \ No newline at end of file +- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. #144255 +- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. #145099 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/ldr-column-families.md b/src/current/_includes/v26.1/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v26.1/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v26.1/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/ldr-composite-primary.md b/src/current/_includes/v26.1/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v26.1/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v26.1/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/ldr-indexes.md b/src/current/_includes/v26.1/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v26.1/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v26.1/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/ldr-sequences.md b/src/current/_includes/v26.1/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v26.1/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v26.1/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/ldr-triggers.md b/src/current/_includes/v26.1/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v26.1/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v26.1/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/ldr-udfs.md b/src/current/_includes/v26.1/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v26.1/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v26.1/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/like-escape-performance.md b/src/current/_includes/v26.1/known-limitations/like-escape-performance.md index 845fdddeeb9..45d8f380e3d 100644 --- a/src/current/_includes/v26.1/known-limitations/like-escape-performance.md +++ b/src/current/_includes/v26.1/known-limitations/like-escape-performance.md @@ -1 +1 @@ -`LIKE` queries with an `ESCAPE` clause cannot use index acceleration, which can result in significantly slower performance compared to standard `LIKE` queries. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +`LIKE` queries with an `ESCAPE` clause cannot use index acceleration, which can result in significantly slower performance compared to standard `LIKE` queries. #30192 diff --git a/src/current/_includes/v26.1/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v26.1/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v26.1/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v26.1/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v26.1/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v26.1/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v26.1/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v26.1/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v26.1/known-limitations/max-row-size-limitations.md b/src/current/_includes/v26.1/known-limitations/max-row-size-limitations.md index 9d4d3e12918..705e6fcc9f7 100644 --- a/src/current/_includes/v26.1/known-limitations/max-row-size-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/max-row-size-limitations.md @@ -1 +1 @@ -- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 diff --git a/src/current/_includes/v26.1/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v26.1/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v26.1/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v26.1/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/node-shutdown-limitations.md b/src/current/_includes/v26.1/known-limitations/node-shutdown-limitations.md index f35f858bdc1..d7eb712ee06 100644 --- a/src/current/_includes/v26.1/known-limitations/node-shutdown-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/node-shutdown-limitations.md @@ -2,4 +2,4 @@ - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run - This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) + This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 diff --git a/src/current/_includes/v26.1/known-limitations/null-limitations.md b/src/current/_includes/v26.1/known-limitations/null-limitations.md index ad2efce233a..ba230019779 100644 --- a/src/current/_includes/v26.1/known-limitations/null-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/null-limitations.md @@ -1 +1 @@ -- By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +- By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 diff --git a/src/current/_includes/v26.1/known-limitations/online-schema-changes-limitations.md b/src/current/_includes/v26.1/known-limitations/online-schema-changes-limitations.md index be6d00c26ce..8f58936ac08 100644 --- a/src/current/_includes/v26.1/known-limitations/online-schema-changes-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/online-schema-changes-limitations.md @@ -28,9 +28,9 @@ You cannot start an online schema change on a table if a [primary key change]({% Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: diff --git a/src/current/_includes/v26.1/known-limitations/partition-limitations.md b/src/current/_includes/v26.1/known-limitations/partition-limitations.md index f8ba5e979fa..b0bcd4b1d51 100644 --- a/src/current/_includes/v26.1/known-limitations/partition-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/partition-limitations.md @@ -1,3 +1,3 @@ - When defining a [table partition]({% link {{ page.version.version }}/partitioning.md %}), either during table creation or table alteration, it is not possible to use placeholders in the `PARTITION BY` clause. - CockroachDB does not currently support dropping a single partition from a table. In order to remove partitions, you can [repartition]({% unless page.name == "partitioning.md" %}{% link {{ page.version.version }}/partitioning.md %}{% endunless %}#repartition-a-table) the table. -- In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +- In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 diff --git a/src/current/_includes/v26.1/known-limitations/plpgsql-limitations.md b/src/current/_includes/v26.1/known-limitations/plpgsql-limitations.md index 60b3104942e..09fd0183a40 100644 --- a/src/current/_includes/v26.1/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/plpgsql-limitations.md @@ -1,23 +1,23 @@ -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/read-committed-limitations.md b/src/current/_includes/v26.1/known-limitations/read-committed-limitations.md index b4722a0c369..1beeca3c6ce 100644 --- a/src/current/_includes/v26.1/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/read-committed-limitations.md @@ -1,7 +1,7 @@ -- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. [#151663](https://github.com/cockroachdb/cockroach/issues/151663#issuecomment-3222083180) -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. #151663 +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/restore-multiregion-match.md b/src/current/_includes/v26.1/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v26.1/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v26.1/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v26.1/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v26.1/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v26.1/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v26.1/known-limitations/restore-udf.md b/src/current/_includes/v26.1/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v26.1/known-limitations/restore-udf.md +++ b/src/current/_includes/v26.1/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/rls-update-set-where-returning.md b/src/current/_includes/v26.1/known-limitations/rls-update-set-where-returning.md index 450de11683e..e6f2b296ba4 100644 --- a/src/current/_includes/v26.1/known-limitations/rls-update-set-where-returning.md +++ b/src/current/_includes/v26.1/known-limitations/rls-update-set-where-returning.md @@ -1 +1 @@ -`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. [#145894](https://github.com/cockroachdb/cockroach/issues/145894) \ No newline at end of file +`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. #145894 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/rls-values-on-conflict-do-nothing.md b/src/current/_includes/v26.1/known-limitations/rls-values-on-conflict-do-nothing.md index c85dea7987a..d274e573dc9 100644 --- a/src/current/_includes/v26.1/known-limitations/rls-values-on-conflict-do-nothing.md +++ b/src/current/_includes/v26.1/known-limitations/rls-values-on-conflict-do-nothing.md @@ -1 +1 @@ -`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. [#35370](https://github.com/cockroachdb/cockroach/issues/35370). +`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. #35370. diff --git a/src/current/_includes/v26.1/known-limitations/routine-limitations.md b/src/current/_includes/v26.1/known-limitations/routine-limitations.md index 773322e2848..cc19d69e3d6 100644 --- a/src/current/_includes/v26.1/known-limitations/routine-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [#123048](https://github.com/cockroachdb/cockroach/issues/123048) -- [Statement diagnostics]({% link {{ page.version.version }}/explain-analyze.md %}#debug-option) cannot be collected for statements executed inside UDFs or stored procedures. You can request statement diagnostics for the top-level invocation of the function or procedure, and the resulting trace includes spans for each statement executed. However, there is no way to target statements executed inside the function or procedure with a statement diagnostics request. [#159526](https://github.com/cockroachdb/cockroach/issues/159526) -- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. [#162627](https://github.com/cockroachdb/cockroach/issues/162627) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. #123048 +- [Statement diagnostics]({% link {{ page.version.version }}/explain-analyze.md %}#debug-option) cannot be collected for statements executed inside UDFs or stored procedures. You can request statement diagnostics for the top-level invocation of the function or procedure, and the resulting trace includes spans for each statement executed. However, there is no way to target statements executed inside the function or procedure with a statement diagnostics request. #159526 +- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. #162627 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v26.1/known-limitations/row-level-ttl-limitations.md index c386ba576d7..4460901566d 100644 --- a/src/current/_includes/v26.1/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v26.1/known-limitations/savepoint-limitations.md b/src/current/_includes/v26.1/known-limitations/savepoint-limitations.md index 58451f4ae28..1bc0bdd8d44 100644 --- a/src/current/_includes/v26.1/known-limitations/savepoint-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/savepoint-limitations.md @@ -1 +1 @@ -- Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities) that contain DDL and `ROLLBACK TO SAVEPOINT` are not supported, as they could result in a deadlock. [#46414](https://github.com/cockroachdb/cockroach/issues/46414) +- Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities) that contain DDL and `ROLLBACK TO SAVEPOINT` are not supported, as they could result in a deadlock. #46414 diff --git a/src/current/_includes/v26.1/known-limitations/select-for-update-limitations.md b/src/current/_includes/v26.1/known-limitations/select-for-update-limitations.md index 9c4ab3fd4a1..89152e6fad0 100644 --- a/src/current/_includes/v26.1/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/select-for-update-limitations.md @@ -1,8 +1,8 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). When running under `SERIALIZABLE` isolation, `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` locks should be thought of as best-effort, and should not be relied upon for correctness. Note that [serialization]({% link {{ page.version.version }}/demo-serializable.md %}) is preserved despite this limitation. This limitation is fixed when the `enable_durable_locking_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}#enable-durable-locking-for-serializable) is set to `true`. This limitation does **not** apply to [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions. - The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroachdb/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) \ No newline at end of file +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v26.1/known-limitations/set-transaction-no-rollback.md index 4f4e5af2aab..966dcd82d48 100644 --- a/src/current/_includes/v26.1/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v26.1/known-limitations/set-transaction-no-rollback.md @@ -30,4 +30,4 @@ UTC ~~~ - [#69396](https://github.com/cockroachdb/cockroach/issues/69396), [#148766](https://github.com/cockroachdb/cockroach/issues/148766) \ No newline at end of file + #69396, #148766 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/show-backup-symlink.md b/src/current/_includes/v26.1/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v26.1/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v26.1/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/spatial-limitations.md b/src/current/_includes/v26.1/known-limitations/spatial-limitations.md index 5bc25717912..2fe9152f2cd 100644 --- a/src/current/_includes/v26.1/known-limitations/spatial-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/spatial-limitations.md @@ -1,10 +1,10 @@ -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} - {% include {{ page.version.version }}/known-limitations/geospatial-heterogeneous-architectures.md %} -- [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled on ARM Macs due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [#93161](https://github.com/cockroachdb/cockroach/issues/93161) \ No newline at end of file +- [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled on ARM Macs due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. #93161 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/sql-cursors.md b/src/current/_includes/v26.1/known-limitations/sql-cursors.md index a3e42b9a3ec..d0ce2b3c3af 100644 --- a/src/current/_includes/v26.1/known-limitations/sql-cursors.md +++ b/src/current/_includes/v26.1/known-limitations/sql-cursors.md @@ -1,8 +1,8 @@ CockroachDB implements SQL [cursor]({% link {{ page.version.version }}/cursors.md %}) support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v26.1/known-limitations/sql-guardrails-limitations.md b/src/current/_includes/v26.1/known-limitations/sql-guardrails-limitations.md index 6d8d60ac614..02ebf98bcd7 100644 --- a/src/current/_includes/v26.1/known-limitations/sql-guardrails-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/sql-guardrails-limitations.md @@ -1,2 +1,2 @@ -- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) -- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 +- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 diff --git a/src/current/_includes/v26.1/known-limitations/srid-4326-limitations.md b/src/current/_includes/v26.1/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v26.1/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v26.1/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v26.1/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v26.1/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v26.1/known-limitations/stored-proc-limitations.md b/src/current/_includes/v26.1/known-limitations/stored-proc-limitations.md index b2aae6069c2..74f4d86dfb3 100644 --- a/src/current/_includes/v26.1/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/stored-proc-limitations.md @@ -1,2 +1,2 @@ -- Pausable portals are not supported with `CALL` statements for stored procedures. [#151529](https://github.com/cockroachdb/cockroach/issues/151529) -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- Pausable portals are not supported with `CALL` statements for stored procedures. #151529 +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/subquery-mutations-limitations.md b/src/current/_includes/v26.1/known-limitations/subquery-mutations-limitations.md index 771eba80bc6..d4deeaee17c 100644 --- a/src/current/_includes/v26.1/known-limitations/subquery-mutations-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/subquery-mutations-limitations.md @@ -2,4 +2,4 @@ - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). - If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) + If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 diff --git a/src/current/_includes/v26.1/known-limitations/transaction-row-count-limitations.md b/src/current/_includes/v26.1/known-limitations/transaction-row-count-limitations.md index ded62d6d9be..a54f15d61a7 100644 --- a/src/current/_includes/v26.1/known-limitations/transaction-row-count-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/transaction-row-count-limitations.md @@ -1 +1 @@ -- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 diff --git a/src/current/_includes/v26.1/known-limitations/trigger-limitations.md b/src/current/_includes/v26.1/known-limitations/trigger-limitations.md index 66fcd0018ed..aaf49607557 100644 --- a/src/current/_includes/v26.1/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/trigger-limitations.md @@ -1,10 +1,10 @@ -- `CREATE OR REPLACE TRIGGER` is not supported. [#128422](https://github.com/cockroachdb/cockroach/issues/128422) -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. [#134555](https://github.com/cockroachdb/cockroach/issues/134555) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) -- The `REFERENCING` clause for `CREATE TRIGGER` is not supported. [#135655](https://github.com/cockroachdb/cockroach/issues/135655) -- CockroachDB uses one-based indexing for the `TG_ARGV` array to maintain consistency with its array indexing system. This differs from PostgreSQL, where `TG_ARGV` uses zero-based indexing, unlike other PostgreSQL arrays. Trigger functions that reference `TG_ARGV` need to be adjusted when migrating from PostgreSQL. [#135311](https://github.com/cockroachdb/cockroach/issues/135311) -- `UPDATE` triggers with a column list (using `UPDATE OF column_name` syntax) are not supported. [#135656](https://github.com/cockroachdb/cockroach/issues/135656) -- Statement-level triggers for `TRUNCATE` events are not supported. [#135657](https://github.com/cockroachdb/cockroach/issues/135657) +- `CREATE OR REPLACE TRIGGER` is not supported. #128422 +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- A [trigger function]({% link {{ page.version.version }}/triggers.md %}#trigger-function) that is used in an existing trigger cannot be replaced with `CREATE OR REPLACE` syntax. To use `CREATE OR REPLACE`, first [drop any triggers]({% link {{ page.version.version }}/drop-trigger.md %}) that are using the function. #134555 +- Hidden columns are not visible to triggers. #133331 +- The `REFERENCING` clause for `CREATE TRIGGER` is not supported. #135655 +- CockroachDB uses one-based indexing for the `TG_ARGV` array to maintain consistency with its array indexing system. This differs from PostgreSQL, where `TG_ARGV` uses zero-based indexing, unlike other PostgreSQL arrays. Trigger functions that reference `TG_ARGV` need to be adjusted when migrating from PostgreSQL. #135311 +- `UPDATE` triggers with a column list (using `UPDATE OF column_name` syntax) are not supported. #135656 +- Statement-level triggers for `TRUNCATE` events are not supported. #135657 - {% include {{ page.version.version }}/known-limitations/drop-trigger-limitations.md %} \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v26.1/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v26.1/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v26.1/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/udf-limitations.md b/src/current/_includes/v26.1/known-limitations/udf-limitations.md index 0f44ac320f5..c212736717b 100644 --- a/src/current/_includes/v26.1/known-limitations/udf-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/udf-limitations.md @@ -1,9 +1,9 @@ -- User-defined functions are not supported in partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). [#151686](https://github.com/cockroachdb/cockroach/issues/151686) -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- User-defined functions are not supported in partial index predicates. #155488 +- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). #151686 +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Partial index predicates. #155488 +- User-defined functions cannot call themselves recursively. #93049 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/vector-limitations.md b/src/current/_includes/v26.1/known-limitations/vector-limitations.md index 97ed7c47599..053087efe7e 100644 --- a/src/current/_includes/v26.1/known-limitations/vector-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/vector-limitations.md @@ -1,5 +1,5 @@ - {% include {{ page.version.version }}/sql/vector-batch-inserts.md %} -- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. [#145227](https://github.com/cockroachdb/cockroach/issues/145227) -- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. [#147839](https://github.com/cockroachdb/cockroach/issues/147839) -- Index acceleration with filters is only supported if the filters match prefix columns. [#146145](https://github.com/cockroachdb/cockroach/issues/146145) -- Index recommendations are not provided for vector indexes. [#146146](https://github.com/cockroachdb/cockroach/issues/146146) \ No newline at end of file +- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. #145227 +- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. #147839 +- Index acceleration with filters is only supported if the filters match prefix columns. #146145 +- Index recommendations are not provided for vector indexes. #146146 \ No newline at end of file diff --git a/src/current/_includes/v26.1/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v26.1/known-limitations/vectorized-engine-limitations.md index 111226546cf..250e4b5ee28 100644 --- a/src/current/_includes/v26.1/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/query-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/query-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v26.1/known-limitations/view-limitations.md b/src/current/_includes/v26.1/known-limitations/view-limitations.md index 02d10afc40b..165b2e475c5 100644 --- a/src/current/_includes/v26.1/known-limitations/view-limitations.md +++ b/src/current/_includes/v26.1/known-limitations/view-limitations.md @@ -1,3 +1,3 @@ -- The `security_invoker` attribute for views is not supported. Views always use the view definer's privileges when checking permissions. [#138918](https://github.com/cockroachdb/cockroach/issues/138918) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) (including ENUMs) in views is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) -- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) \ No newline at end of file +- The `security_invoker` attribute for views is not supported. Views always use the view definer's privileges when checking permissions. #138918 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) (including ENUMs) in views is not supported. #108184 +- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. #166782 \ No newline at end of file diff --git a/src/current/_includes/v26.1/misc/tooling.md b/src/current/_includes/v26.1/misc/tooling.md index d5f69bf12a9..072d2cd6a73 100644 --- a/src/current/_includes/v26.1/misc/tooling.md +++ b/src/current/_includes/v26.1/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Develop with CockroachDB]({% link {{ page.version.version }}/developer-guide-overview.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres) | 0.19.2 | Partial | N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %}) | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Graphical user interfaces (GUIs) diff --git a/src/current/_includes/v26.1/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v26.1/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v26.1/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v26.1/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v26.1/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v26.1/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v26.1/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v26.1/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v26.1/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v26.1/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v26.1/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v26.1/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v26.1/orchestration/test-cluster-secure.md b/src/current/_includes/v26.1/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v26.1/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v26.1/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v26.1/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v26.1/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v26.1/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v26.1/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v26.1/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v26.1/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v26.1/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v26.1/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v26.1/sql/unsupported-postgres-features.md b/src/current/_includes/v26.1/sql/unsupported-postgres-features.md index a89650e38e9..9ad89047189 100644 --- a/src/current/_includes/v26.1/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v26.1/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/_includes/v26.2/backward-incompatible/alpha.1.md b/src/current/_includes/v26.2/backward-incompatible/alpha.1.md index 8d4004422ad..f10881f0418 100644 --- a/src/current/_includes/v26.2/backward-incompatible/alpha.1.md +++ b/src/current/_includes/v26.2/backward-incompatible/alpha.1.md @@ -1,15 +1,15 @@ -- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). [#81298][#81298] -- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. [#81943][#81943] -- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. [#82988][#82988] -- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. [#84887][#84887] +- CockroachDB no longer performs environment variable expansion in the parameter `--certs-dir`. Uses like `--certs-dir='$HOME/path'` (expansion by CockroachDB) can be replaced by `--certs-dir="$HOME/path"` (expansion by the Unix shell). #81298 +- In the Cockroach CLI, [`BOOL` values]({% link {{ page.version.version }}/bool.md %}) are now formatted as `t` or `f` instead of `True` or `False`. #81943 +- Removed the `cockroach quit` command. It has been deprecated since v20.1. To [shut down a node]({% link {{ page.version.version }}/node-shutdown.md %}) gracefully, send a `SIGTERM` signal to it. #82988 +- Added a cluster version to allow the [Pebble storage engine]({% link {{ page.version.version }}/architecture/storage-layer.md#pebble %}) to recombine certain SSTables (specifically, user keys that are split across multiple files in a level of the [log-structured merge-tree]({% link {{ page.version.version }}/architecture/storage-layer.md#log-structured-merge-trees %})). Recombining the split user keys is required for supporting the range keys feature. The migration to recombine the SSTables is expected to be short (split user keys are rare in practice), but will block subsequent migrations until all tables have been recombined. The `storage.marked-for-compaction-files` time series metric can show the progress of the migration. #84887 - Using a single TCP port listener for both RPC (node-node) and SQL client connections is now deprecated. This capability **will be removed** in the next version of CockroachDB. Instead, make one of the following configuration changes to your CockroachDB deployment: - Preferred: keep port `26257` for SQL, and allocate a new port, e.g., `26357`, for node-node RPC connections. For example, you might configure a node with the flags `--listen-addr=:26357 --sql-addr=:26257`, where subsequent nodes seeking to join would then use the flag `--join=othernode:26357,othernode:26257`. This will become the default configuration in the next version of CockroachDB. When using this mode of operation, care should be taken to use a `--join` flag that includes both the previous and new port numbers for other nodes, so that no network partition occurs during the upgrade. - - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. [#85671][#85671] -- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. [#84487][#84487] -- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. [#85986][#85986] -- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. [#81310][#81310] -- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. [#81266][#81266] -- Removed the deprecated `GRANT` privilege. [#81310][#81310] -- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. [#83134][#83134] -- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. [#82560][#82560] -- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. [#76834][#76834] + - Optional: keep port `26257` for RPC, and allocate a new port, e.g., `26357`, for SQL connections. For example, you might configure a node with the flags `--listen-addr=:26257 --sql-addr=:26357`. When using this mode of operation, the `--join` flags do not need to be modified. However, SQL client apps or the SQL load balancer configuration (when in use) must be updated to use the new SQL port number. #85671 +- If no `nullif` option is specified while using [`IMPORT CSV`]({% link {{ page.version.version }}/import.md %}), then a zero-length string in the input is now treated as `NULL`. The quoted empty string in the input is treated as an empty string. Similarly, if `nullif` is specified, then an unquoted value is treated as `NULL`, and a quoted value is treated as that string. These changes were made to make `IMPORT CSV` behave more similarly to `COPY CSV`. If the previous behavior (i.e., treating either quoted or unquoted values that match the `nullif` setting as `NULL`) is desired, you can use the new `allow_quoted_null` option in the `IMPORT` statement. #84487 +- [`COPY FROM`]({% link {{ page.version.version }}/copy.md %}) operations are now atomic by default instead of being segmented into 100 row transactions. Set the `copy_from_atomic_enabled` session setting to `false` for the previous behavior. #85986 +- The `GRANT` privilege has been removed and replaced by the more granular [`WITH GRANT OPTION`]({% link {{ page.version.version }}/grant.md %}#grant-privileges-with-the-option-to-grant-to-others), which provides control over which privileges are allowed to be granted. #81310 +- Removed the ability to cast `int`, `int2`, and `int8` to a `0` length `BIT` or `VARBIT`. #81266 +- Removed the deprecated `GRANT` privilege. #81310 +- Removed the `ttl_automatic_column` storage parameter. The `crdb_internal_expiration` column is created when `ttl_expire_after` is set and removed when `ttl_expire_after` is reset. #83134 +- Removed the byte string parameter in the `crdb_internal.schedule_sql_stats_compaction` function. #82560 +- Changed the default value of the `enable_implicit_transaction_for_batch_statements` to `true`. This means that a [batch of statements]({% link {{ page.version.version }}/transactions.md %}#batched-statements) sent in one string separated by semicolons is treated as an implicit transaction. #76834 diff --git a/src/current/_includes/v26.2/cdc/avro-udt-composite.md b/src/current/_includes/v26.2/cdc/avro-udt-composite.md index 7a34fbd3253..8374c4d2baf 100644 --- a/src/current/_includes/v26.2/cdc/avro-udt-composite.md +++ b/src/current/_includes/v26.2/cdc/avro-udt-composite.md @@ -1 +1 @@ -A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). [#102903](https://github.com/cockroachdb/cockroach/issues/102903) \ No newline at end of file +A changefeed in [Avro format]({% link {{ page.version.version }}/changefeed-messages.md %}#avro) will not be able to serialize [user-defined composite (tuple) types](create-type.html). #102903 \ No newline at end of file diff --git a/src/current/_includes/v26.2/cdc/csv-udt-composite.md b/src/current/_includes/v26.2/cdc/csv-udt-composite.md index 834bddd8366..39c88f3e06a 100644 --- a/src/current/_includes/v26.2/cdc/csv-udt-composite.md +++ b/src/current/_includes/v26.2/cdc/csv-udt-composite.md @@ -1 +1 @@ -A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). [#102905](https://github.com/cockroachdb/cockroach/issues/102905) \ No newline at end of file +A changefeed emitting [CSV]({% link {{ page.version.version }}/changefeed-messages.md %}#csv) will include `AS` labels in the message format when the changefeed serializes a [user-defined composite type]({% link {{ page.version.version }}/create-type.md %}). #102905 \ No newline at end of file diff --git a/src/current/_includes/v26.2/faq/what-is-crdb.md b/src/current/_includes/v26.2/faq/what-is-crdb.md index 28857ed61fa..ee04ca10506 100644 --- a/src/current/_includes/v26.2/faq/what-is-crdb.md +++ b/src/current/_includes/v26.2/faq/what-is-crdb.md @@ -1,6 +1,6 @@ CockroachDB is a [distributed SQL](https://www.cockroachlabs.com/blog/what-is-distributed-sql/) database built on a transactional and strongly-consistent key-value store. It **scales** horizontally; **survives** disk, machine, rack, and even datacenter failures with minimal latency disruption and no manual intervention; supports **strongly-consistent** ACID transactions; and provides a familiar **SQL** API for structuring, manipulating, and querying data. -CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the [source code](https://github.com/cockroachdb/cockroach) is freely available. +CockroachDB is inspired by Google's [Spanner](http://research.google.com/archive/spanner.html) and [F1](http://research.google.com/pubs/pub38125.html) technologies, and the source code is freely available. {{site.data.alerts.callout_success}} For a deeper dive into CockroachDB's capabilities and how it fits into the database landscape, take the free [**Intro to Distributed SQL and CockroachDB**](https://university.cockroachlabs.com/courses/course-v1:crl+intro-to-distributed-sql-and-cockroachdb+self-paced/about) course on Cockroach University. diff --git a/src/current/_includes/v26.2/finalization-required/119894.md b/src/current/_includes/v26.2/finalization-required/119894.md index f2b393c3c0e..f5177dcd523 100644 --- a/src/current/_includes/v26.2/finalization-required/119894.md +++ b/src/current/_includes/v26.2/finalization-required/119894.md @@ -1 +1 @@ -[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. [#119894](https://github.com/cockroachdb/cockroach/pull/119894) +[Splits](https://cockroachlabs.com/docs/{{ include.version }}/architecture/distribution-layer#range-splits) no longer hold [latches](https://cockroachlabs.com/docs/architecture/distribution-layer.#latch-manager) for time proportional to the range size while computing [MVCC](https://cockroachlabs.com/docs/{{ include.version }}/architecture/storage-layer#mvcc) statistics. Instead, MVCC statistics are pre-computed before the critical section of the split. As a side effect, the resulting statistics are no longer 100% accurate because they may correctly distribute writes concurrent with the split. To mitigate against this potential inaccuracy, and to prevent the statistics from drifting after successive splits, the existing stored statistics are re-computed and corrected if needed during the non-critical section of the split. #119894 diff --git a/src/current/_includes/v26.2/known-limitations/active-session-history.md b/src/current/_includes/v26.2/known-limitations/active-session-history.md index 1d195fb07cf..bfb76e871a1 100644 --- a/src/current/_includes/v26.2/known-limitations/active-session-history.md +++ b/src/current/_includes/v26.2/known-limitations/active-session-history.md @@ -1,3 +1,3 @@ -- ASH is not recommended for nodes with 64 or more vCPUs, due to degraded performance on those nodes. [#168289](https://github.com/cockroachdb/cockroach/issues/168289) +- ASH is not recommended for nodes with 64 or more vCPUs, due to degraded performance on those nodes. #168289 - On Basic and Standard CockroachDB {{ site.data.products.cloud }} clusters, ASH samples only cover work running on the [SQL]({% link {{ page.version.version }}/architecture/sql-layer.md %}) pod. KV-level work ([storage]({% link {{ page.version.version }}/architecture/storage-layer.md %}) I/O, [lock waits]({% link {{ page.version.version }}/troubleshoot-lock-contention.md %}), [replication]({% link {{ page.version.version }}/architecture/replication-layer.md %}), etc.) is not visible in ASH samples. -- KV work triggered during [COMMIT]({% link {{ page.version.version }}/commit-transaction.md %}) (for example, [intent resolution]({% link {{ page.version.version }}/architecture/transaction-layer.md %}), [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) proposals deferred from earlier statements in an [explicit transaction]({% link {{ page.version.version }}/begin-transaction.md %})) is attributed to the last [statement's fingerprint]({% link {{ page.version.version }}/ui-statements-page.md %}), not the statement that originally caused the work. [#165864](https://github.com/cockroachdb/cockroach/issues/165864) \ No newline at end of file +- KV work triggered during [COMMIT]({% link {{ page.version.version }}/commit-transaction.md %}) (for example, [intent resolution]({% link {{ page.version.version }}/architecture/transaction-layer.md %}), [Raft]({% link {{ page.version.version }}/architecture/replication-layer.md %}#raft) proposals deferred from earlier statements in an [explicit transaction]({% link {{ page.version.version }}/begin-transaction.md %})) is attributed to the last [statement's fingerprint]({% link {{ page.version.version }}/ui-statements-page.md %}), not the statement that originally caused the work. #165864 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/alter-changefeed-cdc-queries.md b/src/current/_includes/v26.2/known-limitations/alter-changefeed-cdc-queries.md index 56dd7eeaacd..4134b3c9f86 100644 --- a/src/current/_includes/v26.2/known-limitations/alter-changefeed-cdc-queries.md +++ b/src/current/_includes/v26.2/known-limitations/alter-changefeed-cdc-queries.md @@ -1 +1 @@ -{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. [#83033](https://github.com/cockroachdb/cockroach/issues/83033) \ No newline at end of file +{% if page.name == "alter-changefeed.md" %} `ALTER CHANGEFEED` {% else %} [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) {% endif %} is not fully supported with changefeeds that use {% if page.name == "cdc-queries.md" %} CDC queries. {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}). {% endif %} You can alter the options that a changefeed uses, but you cannot alter the changefeed target tables. #83033 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/alter-changefeed-limitations.md b/src/current/_includes/v26.2/known-limitations/alter-changefeed-limitations.md index a183f2964f4..cb75ad71c55 100644 --- a/src/current/_includes/v26.2/known-limitations/alter-changefeed-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/alter-changefeed-limitations.md @@ -1,4 +1,4 @@ -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. [#77171](https://github.com/cockroachdb/cockroach/issues/77171) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any [`ALTER CHANGEFEED`]({% link {{ page.version.version }}/alter-changefeed.md %}) statement. #77171 - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: ~~~ sql diff --git a/src/current/_includes/v26.2/known-limitations/alter-sequence-limitations.md b/src/current/_includes/v26.2/known-limitations/alter-sequence-limitations.md index 7343a1d1f1e..ba6bc88aa32 100644 --- a/src/current/_includes/v26.2/known-limitations/alter-sequence-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/alter-sequence-limitations.md @@ -1 +1 @@ -- Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +- Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 diff --git a/src/current/_includes/v26.2/known-limitations/alter-table-add-column-limitations.md b/src/current/_includes/v26.2/known-limitations/alter-table-add-column-limitations.md index a8e71e5e59c..a8249cfb0f8 100644 --- a/src/current/_includes/v26.2/known-limitations/alter-table-add-column-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/alter-table-add-column-limitations.md @@ -2,7 +2,7 @@ - The column uses a [sequence]({% link {{ page.version.version }}/create-sequence.md %}) as the [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) value, for example using `nextval()`. - The column uses `GENERATED ALWAYS AS IDENTITY` or `GENERATED BY DEFAULT AS IDENTITY`, unless the table being altered is empty. - This is because CockroachDB does not support back-filling sequential column data. [#42508](https://github.com/cockroachdb/cockroach/issues/42508) + This is because CockroachDB does not support back-filling sequential column data. #42508 - When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statement with a [`DEFAULT`]({% link {{ page.version.version }}/default-value.md %}) expression, new values generated: - Use the default [search path]({% link {{ page.version.version }}/sql-name-resolution.md %}#search-path) regardless of the search path configured in the current session via `SET SEARCH_PATH`. - Use the UTC time zone regardless of the time zone configured in the current session via [`SET TIME ZONE`]({% link {{ page.version.version }}/set-vars.md %}). diff --git a/src/current/_includes/v26.2/known-limitations/alter-view-limitations.md b/src/current/_includes/v26.2/known-limitations/alter-view-limitations.md index 642bed6ce08..e44921629ff 100644 --- a/src/current/_includes/v26.2/known-limitations/alter-view-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/alter-view-limitations.md @@ -1,4 +1,4 @@ `ALTER VIEW` does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future. [#10083](https://github.com/cockroachdb/cockroach/issues/10083) \ No newline at end of file +- Renaming a view that other views depend on. This feature may be added in the future. #10083 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/aost-limitations.md b/src/current/_includes/v26.2/known-limitations/aost-limitations.md index 811c884d08d..c42e62800d3 100644 --- a/src/current/_includes/v26.2/known-limitations/aost-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/aost-limitations.md @@ -1 +1 @@ -CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. [#30955](https://github.com/cockroachdb/cockroach/issues/30955) \ No newline at end of file +CockroachDB does not support placeholders in {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %}. The time value must be a constant value embedded in the SQL string. #30955 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md b/src/current/_includes/v26.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md index b0aaf728177..2f6fe466a95 100644 --- a/src/current/_includes/v26.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md +++ b/src/current/_includes/v26.2/known-limitations/cannot-refresh-materialized-views-inside-transactions.md @@ -24,4 +24,4 @@ SQLSTATE: 25000 ~~~ - [#66008](https://github.com/cockroachdb/cockroach/issues/66008) + #66008 diff --git a/src/current/_includes/v26.2/known-limitations/cdc-queries-column-families.md b/src/current/_includes/v26.2/known-limitations/cdc-queries-column-families.md index 505a8c9700e..52bdabec236 100644 --- a/src/current/_includes/v26.2/known-limitations/cdc-queries-column-families.md +++ b/src/current/_includes/v26.2/known-limitations/cdc-queries-column-families.md @@ -1 +1 @@ -Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. [#127761](https://github.com/cockroachdb/cockroach/issues/127761) \ No newline at end of file +Creating a changefeed with {% if page.name == "cdc-queries.md" %} CDC queries {% else %} [CDC queries]({% link {{ page.version.version }}/cdc-queries.md %}) {% endif %} on tables with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %} is not supported. #127761 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/cdc-queries.md b/src/current/_includes/v26.2/known-limitations/cdc-queries.md index 2839eba5eda..552900531b0 100644 --- a/src/current/_includes/v26.2/known-limitations/cdc-queries.md +++ b/src/current/_includes/v26.2/known-limitations/cdc-queries.md @@ -3,5 +3,5 @@ - The following are not permitted in CDC queries: - [Volatile functions]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility). - Sub-select queries. - - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). [#98237](https://github.com/cockroachdb/cockroach/issues/98237) -- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. [#83835](https://github.com/cockroachdb/cockroach/issues/83835) + - [Aggregate]({% link {{ page.version.version }}/functions-and-operators.md %}#aggregate-functions) and [window functions]({% link {{ page.version.version }}/window-functions.md %}) (i.e., functions operating over many rows). #98237 +- `delete` changefeed events will only contain the [primary key]({% link {{ page.version.version }}/primary-key.md %}). All other columns will emit as `NULL`. See [Capture delete messages]({% link {{ page.version.version }}/cdc-queries.md %}#capture-delete-messages) for detail on running a CDC query that emits the deleted values. #83835 diff --git a/src/current/_includes/v26.2/known-limitations/cdc.md b/src/current/_includes/v26.2/known-limitations/cdc.md index a473e94367c..0a3914da8bd 100644 --- a/src/current/_includes/v26.2/known-limitations/cdc.md +++ b/src/current/_includes/v26.2/known-limitations/cdc.md @@ -1,8 +1,8 @@ -- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). [#73435](https://github.com/cockroachdb/cockroach/issues/73435) +- Changefeed target options are limited to tables and [column families]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}). #73435 - {% include {{page.version.version}}/cdc/kafka-vpc-limitation.md %} -- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. [#73431](https://github.com/cockroachdb/cockroach/issues/73431) -- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. [#73432](https://github.com/cockroachdb/cockroach/issues/73432) -- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. [#79452](https://github.com/cockroachdb/cockroach/issues/79452) +- Webhook sinks only support HTTPS. Use the [`insecure_tls_skip_verify`]({% link {{ page.version.version }}/create-changefeed.md %}#insecure-tls-skip-verify) parameter when testing to disable certificate verification; however, this still requires HTTPS and certificates. #73431 +- Formats for changefeed messages are not supported by all changefeed sinks. Refer to the [Changefeed Sinks]({% link {{ page.version.version }}/changefeed-sinks.md %}) page for details on compatible formats with each sink and the [`format`]({% link {{ page.version.version }}/create-changefeed.md %}) option to specify a changefeed message format. #73432 +- Using the [`split_column_families`]({% link {{ page.version.version }}/create-changefeed.md %}#split-column-families) and [`resolved`]({% link {{ page.version.version }}/create-changefeed.md %}#resolved) options on the same changefeed will cause an error when using the following [sinks](changefeed-sinks.html): Kafka and Google Cloud Pub/Sub. Instead, use the individual `FAMILY` keyword to specify column families when creating a changefeed. #79452 - {% include {{page.version.version}}/cdc/types-udt-composite-general.md %} The following limitations apply: - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/changefeed-column-family-message.md b/src/current/_includes/v26.2/known-limitations/changefeed-column-family-message.md index 41744b9b4b4..e77b6cc51e0 100644 --- a/src/current/_includes/v26.2/known-limitations/changefeed-column-family-message.md +++ b/src/current/_includes/v26.2/known-limitations/changefeed-column-family-message.md @@ -1 +1 @@ -When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. [#127736](https://github.com/cockroachdb/cockroach/issues/127736) \ No newline at end of file +When you create a changefeed on a table with more than one {% if page.name == "changefeeds-on-tables-with-column-families.md" %} column family {% else %} [column family]({% link {{ page.version.version }}/changefeeds-on-tables-with-column-families.md %}) {% endif %}, the changefeed will emit messages per column family in separate streams. As a result, [changefeed messages]({% link {{ page.version.version }}/changefeed-messages.md %}) for different column families will arrive at the [sink]({% link {{ page.version.version }}/changefeed-sinks.md %}) under separate topics. #127736 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/citext-limitations.md b/src/current/_includes/v26.2/known-limitations/citext-limitations.md index df0fbc0ff91..c7e43cd2949 100644 --- a/src/current/_includes/v26.2/known-limitations/citext-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/citext-limitations.md @@ -1 +1 @@ -- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. [#149791](https://github.com/cockroachdb/cockroach/issues/149791) \ No newline at end of file +- `CITEXT` types are not currently compatible with the `LIKE` and `ILIKE` operators. #149791 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/collate-limitations.md b/src/current/_includes/v26.2/known-limitations/collate-limitations.md index d3e3e712bb5..7ba94b89053 100644 --- a/src/current/_includes/v26.2/known-limitations/collate-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/collate-limitations.md @@ -1 +1 @@ -- Many string operations are not properly overloaded for [collated strings]({% link {{ page.version.version }}/collate.md %}). For example, the `||` concatenation operator works with regular strings but returns an error with collated strings. [#10679](https://github.com/cockroachdb/cockroach/issues/10679) +- Many string operations are not properly overloaded for [collated strings]({% link {{ page.version.version }}/collate.md %}). For example, the `||` concatenation operator works with regular strings but returns an error with collated strings. #10679 diff --git a/src/current/_includes/v26.2/known-limitations/comment-on-limitations.md b/src/current/_includes/v26.2/known-limitations/comment-on-limitations.md index 568b31ec737..757f74e10f0 100644 --- a/src/current/_includes/v26.2/known-limitations/comment-on-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/comment-on-limitations.md @@ -1 +1 @@ -- The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of an individual table or database. As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +- The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of an individual table or database. As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 diff --git a/src/current/_includes/v26.2/known-limitations/composite-type-limitations.md b/src/current/_includes/v26.2/known-limitations/composite-type-limitations.md index 1ceecb5d889..48ce4591246 100644 --- a/src/current/_includes/v26.2/known-limitations/composite-type-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/composite-type-limitations.md @@ -1,2 +1,2 @@ -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 diff --git a/src/current/_includes/v26.2/known-limitations/copy-syntax.md b/src/current/_includes/v26.2/known-limitations/copy-syntax.md index e64a075dcac..bd2e56e1c25 100644 --- a/src/current/_includes/v26.2/known-limitations/copy-syntax.md +++ b/src/current/_includes/v26.2/known-limitations/copy-syntax.md @@ -1,5 +1,5 @@ CockroachDB does not yet support the following `COPY` syntax: - - `COPY ... WITH FREEZE`. [#85573](https://github.com/cockroachdb/cockroach/issues/85573) - - `COPY ... WITH QUOTE`. [#85574](https://github.com/cockroachdb/cockroach/issues/85574) - - `COPY ... FROM ... WHERE `. [#54580](https://github.com/cockroachdb/cockroach/issues/54580) + - `COPY ... WITH FREEZE`. #85573 + - `COPY ... WITH QUOTE`. #85574 + - `COPY ... FROM ... WHERE `. #54580 diff --git a/src/current/_includes/v26.2/known-limitations/create-statistics-aost-limitation.md b/src/current/_includes/v26.2/known-limitations/create-statistics-aost-limitation.md index 09f86f51c48..d318ce338be 100644 --- a/src/current/_includes/v26.2/known-limitations/create-statistics-aost-limitation.md +++ b/src/current/_includes/v26.2/known-limitations/create-statistics-aost-limitation.md @@ -1 +1 @@ -The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. [#96430](https://github.com/cockroachdb/cockroach/issues/96430) \ No newline at end of file +The `ANALYZE` alias {% if page.name != "create-statistics.md" %}of [`CREATE STATISTICS`]({% link {{ page.version.version }}/create-statistics.md %}){% endif %} does not support specifying an {% if page.name == "as-of-system-time.md" %}`AS OF SYSTEM TIME`{% else %}[`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}){% endif %} timestamp. `ANALYZE` statements use `AS OF SYSTEM TIME '-0.001ms'` automatically. For more control over the statistics interval, use the `CREATE STATISTICS` syntax instead. #96430 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/data-domiciling-limitations.md b/src/current/_includes/v26.2/known-limitations/data-domiciling-limitations.md index b4e1092fdac..7d8faa3ce97 100644 --- a/src/current/_includes/v26.2/known-limitations/data-domiciling-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/data-domiciling-limitations.md @@ -1,4 +1,4 @@ -- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. [#150783](https://github.com/cockroachdb/cockroach/issues/150783) +- When using the `infer_rbr_region_col_using_constraint` option, inserting rows with `DEFAULT` for the region column uses the database's primary region instead of inferring the region from the parent table via foreign-key constraint. #150783 - When columns are [indexed]({% link {{ page.version.version }}/indexes.md %}), a subset of data from the indexed columns may appear in [meta ranges]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#meta-ranges) or other system tables. CockroachDB synchronizes these system ranges and system tables across nodes. This synchronization does not respect any multi-region settings applied via either the [multi-region SQL statements]({% link {{ page.version.version }}/multiregion-overview.md %}), or the low-level [zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) mechanism. - [Zone configs]({% link {{ page.version.version }}/configure-replication-zones.md %}) can be used for data placement but these features were historically built for performance, not for domiciling. The replication system's top priority is to prevent the loss of data and it may override the zone configurations if necessary to ensure data durability. For more information, see [Replication Controls]({% link {{ page.version.version }}/configure-replication-zones.md %}#types-of-constraints). - If your [log files]({% link {{ page.version.version }}/logging-overview.md %}) are kept in the region where they were generated, there is some cross-region leakage (like the system tables described previously), but the majority of user data that makes it into the logs is going to be homed in that region. If that's not strong enough, you can use the [log redaction functionality]({% link {{ page.version.version }}/configure-logs.md %}#redact-logs) to strip all raw data from the logs. You can also limit your log retention entirely. diff --git a/src/current/_includes/v26.2/known-limitations/distsql-heterogeneous-endianness.md b/src/current/_includes/v26.2/known-limitations/distsql-heterogeneous-endianness.md index 9194395dc04..7aa2963023e 100644 --- a/src/current/_includes/v26.2/known-limitations/distsql-heterogeneous-endianness.md +++ b/src/current/_includes/v26.2/known-limitations/distsql-heterogeneous-endianness.md @@ -1 +1 @@ -In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. [#148773](https://github.com/cockroachdb/cockroach/issues/148773) \ No newline at end of file +In clusters that mix big-endian and little-endian architectures, DistSQL may produce incorrect results because hash computations differ between the platforms. As a workaround on heterogeneous clusters, disable DistSQL with `SET CLUSTER SETTING sql.defaults.distsql = off`. #148773 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/drop-column-partial-index.md b/src/current/_includes/v26.2/known-limitations/drop-column-partial-index.md index 9fd1811cc43..fd28f0a96a2 100644 --- a/src/current/_includes/v26.2/known-limitations/drop-column-partial-index.md +++ b/src/current/_includes/v26.2/known-limitations/drop-column-partial-index.md @@ -1 +1 @@ -CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). [#97813](https://github.com/cockroachdb/cockroach/issues/97813). \ No newline at end of file +CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). #97813. \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/drop-owned-by-limitations.md b/src/current/_includes/v26.2/known-limitations/drop-owned-by-limitations.md index 95685f6adf1..af99d4b7cfe 100644 --- a/src/current/_includes/v26.2/known-limitations/drop-owned-by-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/drop-owned-by-limitations.md @@ -10,4 +10,4 @@ The phrase "synthetic privileges" in the error message refers to [system-level privileges]({% link {{ page.version.version }}/security-reference/authorization.md %}#supported-privileges). - The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. [#88149](https://github.com/cockroachdb/cockroach/issues/88149) \ No newline at end of file + The workaround is to use [`SHOW SYSTEM GRANTS FOR {role}`](show-system-grants.html) and then use [`REVOKE SYSTEM ...`](revoke.html#revoke-system-level-privileges-on-the-entire-cluster) for each privilege in the result. #88149 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/enforce-home-region-limitations.md b/src/current/_includes/v26.2/known-limitations/enforce-home-region-limitations.md index 693829358f7..53ce961b902 100644 --- a/src/current/_includes/v26.2/known-limitations/enforce-home-region-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/enforce-home-region-limitations.md @@ -1 +1 @@ -With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. [#148375](https://github.com/cockroachdb/cockroach/issues/148375) \ No newline at end of file +With `enforce_home_region` enabled, CockroachDB currently validates home-region access during plan build. This can falsely reject queries (e.g., lookup joins) that would only read local data at execution time, returning a `Query has no home region` error. #148375 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/expression-index-limitations.md b/src/current/_includes/v26.2/known-limitations/expression-index-limitations.md index c0e94185948..9487028791c 100644 --- a/src/current/_includes/v26.2/known-limitations/expression-index-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/expression-index-limitations.md @@ -1,6 +1,6 @@ - The expression cannot reference columns outside the index's table. - Functional expression output must be determined by the input arguments. For example, you can't use the [volatile function]({% link {{ page.version.version }}/functions-and-operators.md %}#function-volatility) `now()` to create an index because its output depends on more than just the function arguments. -- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). [#67900](https://github.com/cockroachdb/cockroach/issues/67900) +- CockroachDB does not allow {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} to reference [computed columns]({% link {{ page.version.version }}/computed-columns.md %}). #67900 - CockroachDB does not support expressions as `ON CONFLICT` targets. This means that unique {% if page.name == "expression-indexes.md" %} expression indexes {% else %} [expression indexes]({% link {{ page.version.version }}/expression-indexes.md %}) {% endif %} cannot be selected as arbiters for [`INSERT .. ON CONFLICT`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause) statements. For example: {% include_cached copy-clipboard.html %} @@ -40,4 +40,4 @@ HINT: try \h INSERT ~~~ - [#67893](https://github.com/cockroachdb/cockroach/issues/67893) + #67893 diff --git a/src/current/_includes/v26.2/known-limitations/forecasted-stats-limitations.md b/src/current/_includes/v26.2/known-limitations/forecasted-stats-limitations.md index c8753124a96..b5d03e8ffc5 100644 --- a/src/current/_includes/v26.2/known-limitations/forecasted-stats-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/forecasted-stats-limitations.md @@ -6,4 +6,4 @@ Although [`SHOW STATISTICS WITH FORECAST`]({% link {{ page.version.version }}/show-statistics.md %}#display-forecasted-statistics) shows the settings taking effect immediately, they do not actually take effect until new statistics are collected (as can be verified with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %})). - As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. [#123852](https://github.com/cockroachdb/cockroach/issues/123852) \ No newline at end of file + As a workaround, disable and enable forecasting at the [cluster]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-clusters) or [table]({% link {{ page.version.version }}/cost-based-optimizer.md %}#enable-and-disable-automatic-statistics-collection-for-tables) level. This will invalidate the statistics cache and cause these settings to take effect immediately. #123852 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/full-text-search-unsupported.md b/src/current/_includes/v26.2/known-limitations/full-text-search-unsupported.md index 7b5a83f2cae..2be666994f1 100644 --- a/src/current/_includes/v26.2/known-limitations/full-text-search-unsupported.md +++ b/src/current/_includes/v26.2/known-limitations/full-text-search-unsupported.md @@ -11,4 +11,4 @@ - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -[#41288](https://github.com/cockroachdb/cockroach/issues/41288) \ No newline at end of file +#41288 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/generic-query-plan-limitations.md b/src/current/_includes/v26.2/known-limitations/generic-query-plan-limitations.md index e28e66d5f32..03d72195334 100644 --- a/src/current/_includes/v26.2/known-limitations/generic-query-plan-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/generic-query-plan-limitations.md @@ -1,2 +1,2 @@ -- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. [#128916](https://github.com/cockroachdb/cockroach/issues/128916) -- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. [#128911](https://github.com/cockroachdb/cockroach/issues/128911) \ No newline at end of file +- Because [generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache) use lookup joins instead of the scans and revscans used by custom query plans, generic query plans do not perform as well as custom query plans in some cases. #128916 +- [Generic query plans]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-type) are not included in the [plan cache]({% link {{ page.version.version }}/cost-based-optimizer.md %}#query-plan-cache). This means a generic query plan built and optimized for a prepared statement in one session cannot be used by another session. To reuse generic query plans for maximum performance, a prepared statement should be executed multiple times instead of prepared and executed once. #128911 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/geospatial-heterogeneous-architectures.md b/src/current/_includes/v26.2/known-limitations/geospatial-heterogeneous-architectures.md index 4bb9633f138..55b706c3d6d 100644 --- a/src/current/_includes/v26.2/known-limitations/geospatial-heterogeneous-architectures.md +++ b/src/current/_includes/v26.2/known-limitations/geospatial-heterogeneous-architectures.md @@ -1 +1 @@ -Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. [#148783](https://github.com/cockroachdb/cockroach/issues/148783) \ No newline at end of file +Clusters that mix `s390x` with other CPU architectures are unsupported for geospatial workloads. Due to differences in how trigonometric functions are computed on `s390x` systems, geospatial queries in heterogeneous clusters with `s390x` are likely to get incorrect results. This can include taking a backup on one architecture and restoring it on another. #148783 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/import-into-limitations.md b/src/current/_includes/v26.2/known-limitations/import-into-limitations.md index 6f2c04b65f1..84f99e379af 100644 --- a/src/current/_includes/v26.2/known-limitations/import-into-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/import-into-limitations.md @@ -4,7 +4,7 @@ - After importing into an existing table, [constraints]({% link {{ page.version.version }}/constraints.md %}) will be un-validated and need to be [re-validated]({% link {{ page.version.version }}/alter-table.md %}#validate-constraint). - Imported rows must not conflict with existing rows in the table or any unique secondary indexes. - `IMPORT INTO` works for only a single existing table. -- When `IMPORT INTO` uses distributed merge, it stores intermediate SST files on participating SQL instances' local storage. If one of those SQL instances becomes unavailable during the merge phase, the job waits for that SQL instance to become available again. If the SQL instance does not become available again, the job fails with a permanent error. [#167491](https://github.com/cockroachdb/cockroach/issues/167491) +- When `IMPORT INTO` uses distributed merge, it stores intermediate SST files on participating SQL instances' local storage. If one of those SQL instances becomes unavailable during the merge phase, the job waits for that SQL instance to become available again. If the SQL instance does not become available again, the job fails with a permanent error. #167491 - `IMPORT INTO` can sometimes fail with a "context canceled" error, or can restart itself many times without ever finishing. If this is happening, it is likely due to a high amount of disk contention. This can be mitigated by setting the `kv.bulk_io_write.max_rate` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to a value below your max disk write speed. For example, to set it to 10MB/s, execute: {% include_cached copy-clipboard.html %} ~~~ sql diff --git a/src/current/_includes/v26.2/known-limitations/int-limitations.md b/src/current/_includes/v26.2/known-limitations/int-limitations.md index 6333dbf6ed5..defda16db88 100644 --- a/src/current/_includes/v26.2/known-limitations/int-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/int-limitations.md @@ -1 +1 @@ -- When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::INT`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +- When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::INT`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 diff --git a/src/current/_includes/v26.2/known-limitations/inverted-index-limitations.md b/src/current/_includes/v26.2/known-limitations/inverted-index-limitations.md index a19a10295eb..108d5eaaba3 100644 --- a/src/current/_includes/v26.2/known-limitations/inverted-index-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/inverted-index-limitations.md @@ -1,3 +1,3 @@ -- CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) -- CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables for `tsvector` and `tsquery` types. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) -- [Left joins]({% link {{ page.version.version }}/joins.md %}#left-outer-joins) and anti joins involving [`JSONB`]({% link {{ page.version.version }}/jsonb.md %}), [`ARRAY`]({% link {{ page.version.version }}/array.md %}), or [spatial-typed]({% link {{ page.version.version }}/query-spatial-data.md %}) columns with a multi-column or [partitioned]({% link {{ page.version.version }}/alter-index.md %}#partition-by) [GIN index](inverted-indexes.html) will not take advantage of the index if the prefix columns of the index are unconstrained, or if they are constrained to multiple, constant values. To work around this limitation, make sure that the prefix columns of the index are either constrained to single constant values, or are part of an equality condition with an input column. [#59649](https://github.com/cockroachdb/cockroach/issues/59649) +- CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 +- CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables for `tsvector` and `tsquery` types. #102731 +- [Left joins]({% link {{ page.version.version }}/joins.md %}#left-outer-joins) and anti joins involving [`JSONB`]({% link {{ page.version.version }}/jsonb.md %}), [`ARRAY`]({% link {{ page.version.version }}/array.md %}), or [spatial-typed]({% link {{ page.version.version }}/query-spatial-data.md %}) columns with a multi-column or [partitioned]({% link {{ page.version.version }}/alter-index.md %}#partition-by) [GIN index](inverted-indexes.html) will not take advantage of the index if the prefix columns of the index are unconstrained, or if they are constrained to multiple, constant values. To work around this limitation, make sure that the prefix columns of the index are either constrained to single constant values, or are part of an equality condition with an input column. #59649 diff --git a/src/current/_includes/v26.2/known-limitations/jsonpath-limitations.md b/src/current/_includes/v26.2/known-limitations/jsonpath-limitations.md index 9b51bfb6e87..a74232bd40b 100644 --- a/src/current/_includes/v26.2/known-limitations/jsonpath-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/jsonpath-limitations.md @@ -1,2 +1,2 @@ -- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. [#144255](https://github.com/cockroachdb/cockroach/issues/144255) -- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. [#145099](https://github.com/cockroachdb/cockroach/issues/145099) \ No newline at end of file +- The following keywords are only accepted in lowercase: `strict`, `lax`, `exists`, `like_regex`, `flag`, `is unknown`, `to`, `last`. #144255 +- Comparisons involving empty arrays (e.g., `SELECT jsonb_path_query('{"a": [1], "b": []}', '$.a == $.b');`) return `null`, rather than `false` as in PostgreSQL. #145099 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/ldr-column-families.md b/src/current/_includes/v26.2/known-limitations/ldr-column-families.md index 2a7c3bbba52..0c9dcd74ae4 100644 --- a/src/current/_includes/v26.2/known-limitations/ldr-column-families.md +++ b/src/current/_includes/v26.2/known-limitations/ldr-column-families.md @@ -1 +1 @@ -Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). [#133562](https://github.com/cockroachdb/cockroach/issues/133562) \ No newline at end of file +Replicating tables cannot contain [column families]({% link {{ page.version.version }}/column-families.md %}). #133562 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/ldr-composite-primary.md b/src/current/_includes/v26.2/known-limitations/ldr-composite-primary.md index ac897af35a7..2a790952946 100644 --- a/src/current/_includes/v26.2/known-limitations/ldr-composite-primary.md +++ b/src/current/_includes/v26.2/known-limitations/ldr-composite-primary.md @@ -1 +1 @@ -The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. [#133572](https://github.com/cockroachdb/cockroach/issues/133572) \ No newline at end of file +The [primary key]({% link {{ page.version.version }}/primary-key.md %}) in replicating tables cannot contain composite types. #133572 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/ldr-indexes.md b/src/current/_includes/v26.2/known-limitations/ldr-indexes.md index 0bf7f60c2d4..4936f9868bd 100644 --- a/src/current/_includes/v26.2/known-limitations/ldr-indexes.md +++ b/src/current/_includes/v26.2/known-limitations/ldr-indexes.md @@ -1 +1 @@ -Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. [#133560](https://github.com/cockroachdb/cockroach/issues/133560) \ No newline at end of file +Replicating tables cannot contain an [index]({% link {{ page.version.version }}/indexes.md %}) that requires expression evaluation before insertion. #133560 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/ldr-sequences.md b/src/current/_includes/v26.2/known-limitations/ldr-sequences.md index 4e39f3630e3..693c0cc685b 100644 --- a/src/current/_includes/v26.2/known-limitations/ldr-sequences.md +++ b/src/current/_includes/v26.2/known-limitations/ldr-sequences.md @@ -1 +1 @@ -Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). [#132303](https://github.com/cockroachdb/cockroach/issues/132303) \ No newline at end of file +Replicating table cannot reference [sequences]({% link {{ page.version.version }}/create-sequence.md %}). #132303 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/ldr-triggers.md b/src/current/_includes/v26.2/known-limitations/ldr-triggers.md index 55f8e885b97..a587568b69a 100644 --- a/src/current/_includes/v26.2/known-limitations/ldr-triggers.md +++ b/src/current/_includes/v26.2/known-limitations/ldr-triggers.md @@ -1 +1 @@ -Replicating tables cannot reference triggers. [#132301](https://github.com/cockroachdb/cockroach/issues/132301) \ No newline at end of file +Replicating tables cannot reference triggers. #132301 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/ldr-udfs.md b/src/current/_includes/v26.2/known-limitations/ldr-udfs.md index fb642f14751..62eea605b52 100644 --- a/src/current/_includes/v26.2/known-limitations/ldr-udfs.md +++ b/src/current/_includes/v26.2/known-limitations/ldr-udfs.md @@ -1 +1 @@ -Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#132302](https://github.com/cockroachdb/cockroach/issues/132302) \ No newline at end of file +Replicating tables cannot reference [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #132302 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/like-escape-performance.md b/src/current/_includes/v26.2/known-limitations/like-escape-performance.md index 845fdddeeb9..45d8f380e3d 100644 --- a/src/current/_includes/v26.2/known-limitations/like-escape-performance.md +++ b/src/current/_includes/v26.2/known-limitations/like-escape-performance.md @@ -1 +1 @@ -`LIKE` queries with an `ESCAPE` clause cannot use index acceleration, which can result in significantly slower performance compared to standard `LIKE` queries. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +`LIKE` queries with an `ESCAPE` clause cannot use index acceleration, which can result in significantly slower performance compared to standard `LIKE` queries. #30192 diff --git a/src/current/_includes/v26.2/known-limitations/locality-optimized-search-virtual-computed-columns.md b/src/current/_includes/v26.2/known-limitations/locality-optimized-search-virtual-computed-columns.md index d6acf418aa8..39b3f2cb00e 100644 --- a/src/current/_includes/v26.2/known-limitations/locality-optimized-search-virtual-computed-columns.md +++ b/src/current/_includes/v26.2/known-limitations/locality-optimized-search-virtual-computed-columns.md @@ -1 +1 @@ -- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). [#68129](https://github.com/cockroachdb/cockroach/issues/68129) +- {% if page.name == "cost-based-optimizer.md" %} Locality optimized search {% else %} [Locality optimized search]({% link {{ page.version.version }}/cost-based-optimizer.md %}#locality-optimized-search-in-multi-region-clusters) {% endif %} does not work for queries that use [partitioned unique indexes]({% link {{ page.version.version }}/partitioning.md %}#partition-using-a-secondary-index) on [virtual computed columns](computed-columns.html#virtual-computed-columns). A workaround for computed columns is to make the virtual computed column a [stored computed column](computed-columns.html#stored-computed-columns). Locality optimized search does not work for queries that use partitioned unique [expression indexes](expression-indexes.html). #68129 diff --git a/src/current/_includes/v26.2/known-limitations/materialized-views-no-stats.md b/src/current/_includes/v26.2/known-limitations/materialized-views-no-stats.md index 02f2bd787c4..2bfe00d5307 100644 --- a/src/current/_includes/v26.2/known-limitations/materialized-views-no-stats.md +++ b/src/current/_includes/v26.2/known-limitations/materialized-views-no-stats.md @@ -1 +1 @@ -- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. [#78181](https://github.com/cockroachdb/cockroach/issues/78181). +- The optimizer may not select the most optimal query plan when querying materialized views because CockroachDB does not [collect statistics]({% link {{ page.version.version }}/cost-based-optimizer.md %}#table-statistics) on materialized views. #78181. diff --git a/src/current/_includes/v26.2/known-limitations/max-row-size-limitations.md b/src/current/_includes/v26.2/known-limitations/max-row-size-limitations.md index 9d4d3e12918..705e6fcc9f7 100644 --- a/src/current/_includes/v26.2/known-limitations/max-row-size-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/max-row-size-limitations.md @@ -1 +1 @@ -- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 diff --git a/src/current/_includes/v26.2/known-limitations/multiple-arbiter-indexes.md b/src/current/_includes/v26.2/known-limitations/multiple-arbiter-indexes.md index c9861623314..28b41eca491 100644 --- a/src/current/_includes/v26.2/known-limitations/multiple-arbiter-indexes.md +++ b/src/current/_includes/v26.2/known-limitations/multiple-arbiter-indexes.md @@ -1 +1 @@ -CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. [#53170](https://github.com/cockroachdb/cockroach/issues/53170) \ No newline at end of file +CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. #53170 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/node-shutdown-limitations.md b/src/current/_includes/v26.2/known-limitations/node-shutdown-limitations.md index f35f858bdc1..d7eb712ee06 100644 --- a/src/current/_includes/v26.2/known-limitations/node-shutdown-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/node-shutdown-limitations.md @@ -2,4 +2,4 @@ - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run - This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) + This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 diff --git a/src/current/_includes/v26.2/known-limitations/null-limitations.md b/src/current/_includes/v26.2/known-limitations/null-limitations.md index ad2efce233a..ba230019779 100644 --- a/src/current/_includes/v26.2/known-limitations/null-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/null-limitations.md @@ -1 +1 @@ -- By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +- By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 diff --git a/src/current/_includes/v26.2/known-limitations/online-schema-changes-limitations.md b/src/current/_includes/v26.2/known-limitations/online-schema-changes-limitations.md index be6d00c26ce..8f58936ac08 100644 --- a/src/current/_includes/v26.2/known-limitations/online-schema-changes-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/online-schema-changes-limitations.md @@ -28,9 +28,9 @@ You cannot start an online schema change on a table if a [primary key change]({% Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: diff --git a/src/current/_includes/v26.2/known-limitations/partition-limitations.md b/src/current/_includes/v26.2/known-limitations/partition-limitations.md index f8ba5e979fa..b0bcd4b1d51 100644 --- a/src/current/_includes/v26.2/known-limitations/partition-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/partition-limitations.md @@ -1,3 +1,3 @@ - When defining a [table partition]({% link {{ page.version.version }}/partitioning.md %}), either during table creation or table alteration, it is not possible to use placeholders in the `PARTITION BY` clause. - CockroachDB does not currently support dropping a single partition from a table. In order to remove partitions, you can [repartition]({% unless page.name == "partitioning.md" %}{% link {{ page.version.version }}/partitioning.md %}{% endunless %}#repartition-a-table) the table. -- In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +- In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 diff --git a/src/current/_includes/v26.2/known-limitations/plpgsql-limitations.md b/src/current/_includes/v26.2/known-limitations/plpgsql-limitations.md index 60b3104942e..09fd0183a40 100644 --- a/src/current/_includes/v26.2/known-limitations/plpgsql-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/plpgsql-limitations.md @@ -1,23 +1,23 @@ -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [#114701](https://github.com/cockroachdb/cockroach/issues/114701) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). #114701 - The following statements are not supported: - - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. [#105246](https://github.com/cockroachdb/cockroach/issues/105246) - - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [#111446](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [#106237](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [#117750](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). [#105713](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [#117508](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) -- `NOT NULL` variable declarations are not supported. [#105243](https://github.com/cockroachdb/cockroach/issues/105243) -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [#111479](https://github.com/cockroachdb/cockroach/issues/111479) -- Cursors in PL/pgSQL cannot be declared with arguments. [#117746](https://github.com/cockroachdb/cockroach/issues/117746) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [#117744](https://github.com/cockroachdb/cockroach/issues/117744) -- The `print_strict_params` option is not supported in PL/pgSQL. [#123671](https://github.com/cockroachdb/cockroach/issues/123671) -- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. [#122306](https://github.com/cockroachdb/cockroach/issues/122306) -- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. [#115680](https://github.com/cockroachdb/cockroach/issues/115680) -- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. [#115384](https://github.com/cockroachdb/cockroach/issues/115384) -- Variables cannot be declared with an associated collation using the `COLLATE` keyword. [#105245](https://github.com/cockroachdb/cockroach/issues/105245) -- Variables cannot be accessed using the `label.var_name` pattern. [#122322](https://github.com/cockroachdb/cockroach/issues/122322) \ No newline at end of file + - `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops. #105246 + - `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `CASE`. #117744 +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). #111446 +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). #106237 +- `RAISE` statements message the client directly, and do not produce log output. #117750 +- `ASSERT` debugging checks are not supported. #117744 +- `RECORD` parameters and variables are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}). #105713 +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. #117508 +- Syntax for accessing members of composite types without parentheses is not supported. #114687 +- `NOT NULL` variable declarations are not supported. #105243 +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. #111479 +- Cursors in PL/pgSQL cannot be declared with arguments. #117746 +- `OPEN FOR EXECUTE` is not supported for opening cursors. #117744 +- The `print_strict_params` option is not supported in PL/pgSQL. #123671 +- The `FOUND` local variable, which checks whether a statement affected any rows, is not supported in PL/pgSQL. #122306 +- By default, when a PL/pgSQL variable conflicts with a column name, CockroachDB resolves the ambiguity by treating it as a column reference rather than a variable reference. This behavior differs from PostgreSQL, where an ambiguous column error is reported, and it is possible to change the `plpgsql.variable_conflict` setting in order to prefer either columns or variables. #115680 +- It is not possible to define a `RECORD`-returning PL/pgSQL function that returns different-typed expressions from different `RETURN` statements. CockroachDB requires a consistent return type for `RECORD`-returning functions. #115384 +- Variables cannot be declared with an associated collation using the `COLLATE` keyword. #105245 +- Variables cannot be accessed using the `label.var_name` pattern. #122322 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/read-committed-limitations.md b/src/current/_includes/v26.2/known-limitations/read-committed-limitations.md index b4722a0c369..1beeca3c6ce 100644 --- a/src/current/_includes/v26.2/known-limitations/read-committed-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/read-committed-limitations.md @@ -1,7 +1,7 @@ -- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. [#151663](https://github.com/cockroachdb/cockroach/issues/151663#issuecomment-3222083180) -- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [#114778](https://github.com/cockroachdb/cockroach/issues/114778) -- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [#112488](https://github.com/cockroachdb/cockroach/issues/112488) +- Mixed-isolation-level workloads must enable foreign-key check locking for `SERIALIZABLE` transactions to avoid race conditions. #151663 +- Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions when the [`autocommit_before_ddl` session setting]({% link {{page.version.version}}/set-vars.md %}#autocommit-before-ddl) is set to `off`, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. #114778 +- Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. #112488 - Because locks acquired by [foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks, [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}), and [`SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) are fully replicated under `READ COMMITTED` isolation, some queries experience a delay for Raft replication. - [Foreign key]({% link {{ page.version.version }}/foreign-key.md %}) checks are not performed in parallel under `READ COMMITTED` isolation. - [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements are less optimized under `READ COMMITTED` isolation than under `SERIALIZABLE` isolation. Under `READ COMMITTED` isolation, `SELECT FOR UPDATE` and `SELECT FOR SHARE` usually perform an extra lookup join for every locked table when compared to the same queries under `SERIALIZABLE`. In addition, some optimization steps (such as de-correlation of correlated [subqueries]({% link {{ page.version.version }}/subqueries.md %})) are not currently performed on these queries. -- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. [#120673](https://github.com/cockroachdb/cockroach/issues/120673) \ No newline at end of file +- Regardless of isolation level, [`SELECT FOR UPDATE` and `SELECT FOR SHARE`]({% link {{ page.version.version }}/select-for-update.md %}) statements in CockroachDB do not prevent insertion of new rows matching the search condition (i.e., [phantom reads]({% link {{ page.version.version }}/read-committed.md %}#non-repeatable-reads-and-phantom-reads)). This matches PostgreSQL behavior at all isolation levels. #120673 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/restore-multiregion-match.md b/src/current/_includes/v26.2/known-limitations/restore-multiregion-match.md index ab2f1029ecd..634d695246e 100644 --- a/src/current/_includes/v26.2/known-limitations/restore-multiregion-match.md +++ b/src/current/_includes/v26.2/known-limitations/restore-multiregion-match.md @@ -47,4 +47,4 @@ ALTER DATABASE destination_database SET PRIMARY REGION "us-east1"; ~~~ - [#71071](https://github.com/cockroachdb/cockroach/issues/71071) \ No newline at end of file + #71071 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/restore-tables-non-multi-reg.md b/src/current/_includes/v26.2/known-limitations/restore-tables-non-multi-reg.md index 5390f2d09ee..e05bc340141 100644 --- a/src/current/_includes/v26.2/known-limitations/restore-tables-non-multi-reg.md +++ b/src/current/_includes/v26.2/known-limitations/restore-tables-non-multi-reg.md @@ -1 +1 @@ -Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. [#71502](https://github.com/cockroachdb/cockroach/issues/71502) +Restoring [`GLOBAL`]({% link {{ page.version.version }}/table-localities.md %}#global-tables) and [`REGIONAL BY TABLE`]({% link {{ page.version.version }}/table-localities.md %}#regional-tables) tables into a **non**-multi-region database is not supported. #71502 diff --git a/src/current/_includes/v26.2/known-limitations/restore-udf.md b/src/current/_includes/v26.2/known-limitations/restore-udf.md index a4a4bc080fe..e4180219a25 100644 --- a/src/current/_includes/v26.2/known-limitations/restore-udf.md +++ b/src/current/_includes/v26.2/known-limitations/restore-udf.md @@ -1 +1 @@ -`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. [#118195](https://github.com/cockroachdb/cockroach/issues/118195) \ No newline at end of file +`RESTORE` will not restore a table that references a [UDF]({% link {{ page.version.version }}/user-defined-functions.md %}), unless you skip restoring the function with the {% if page.name == "restore.md" %} [`skip_missing_udfs`](#skip-missing-udfs) {% else %} [`skip_missing_udfs`]({% link {{ page.version.version }}/restore.md %}#skip-missing-udfs) {% endif %} option. Alternatively, take a [database-level backup]({% link {{ page.version.version }}/backup.md %}#back-up-a-database) to include everything needed to restore the table. #118195 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/restore-zones.md b/src/current/_includes/v26.2/known-limitations/restore-zones.md index 4dfad262e73..d83566ac514 100644 --- a/src/current/_includes/v26.2/known-limitations/restore-zones.md +++ b/src/current/_includes/v26.2/known-limitations/restore-zones.md @@ -1 +1 @@ -When restoring from [non-cluster backups]({% link {{ page.version.version }}/backup.md %}#back-up-a-database), `RESTORE` does not restore [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}) for restored objects. [#167824](https://github.com/cockroachdb/cockroach/issues/167824) \ No newline at end of file +When restoring from [non-cluster backups]({% link {{ page.version.version }}/backup.md %}#back-up-a-database), `RESTORE` does not restore [zone configurations]({% link {{ page.version.version }}/configure-replication-zones.md %}) for restored objects. #167824 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/rls-update-set-where-returning.md b/src/current/_includes/v26.2/known-limitations/rls-update-set-where-returning.md index 450de11683e..e6f2b296ba4 100644 --- a/src/current/_includes/v26.2/known-limitations/rls-update-set-where-returning.md +++ b/src/current/_includes/v26.2/known-limitations/rls-update-set-where-returning.md @@ -1 +1 @@ -`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. [#145894](https://github.com/cockroachdb/cockroach/issues/145894) \ No newline at end of file +`UPDATE` statements whose `SET`, `WHERE`, or `RETURNING` clauses do not read existing column values can be mistakenly filtered by row-level security `SELECT` policies, causing the statement to affect no rows. #145894 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/rls-values-on-conflict-do-nothing.md b/src/current/_includes/v26.2/known-limitations/rls-values-on-conflict-do-nothing.md index c85dea7987a..d274e573dc9 100644 --- a/src/current/_includes/v26.2/known-limitations/rls-values-on-conflict-do-nothing.md +++ b/src/current/_includes/v26.2/known-limitations/rls-values-on-conflict-do-nothing.md @@ -1 +1 @@ -`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. [#35370](https://github.com/cockroachdb/cockroach/issues/35370). +`ON CONFLICT ... DO NOTHING`: CockroachDB does not run the constraint and row-level policy checks on the `VALUES` clause if the candidate row has a conflict. #35370. diff --git a/src/current/_includes/v26.2/known-limitations/routine-limitations.md b/src/current/_includes/v26.2/known-limitations/routine-limitations.md index 773322e2848..cc19d69e3d6 100644 --- a/src/current/_includes/v26.2/known-limitations/routine-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/routine-limitations.md @@ -1,10 +1,10 @@ -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- Routines cannot be created with an `OUT` parameter of type `RECORD`. [#123448](https://github.com/cockroachdb/cockroach/issues/123448) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. [#110080](https://github.com/cockroachdb/cockroach/issues/110080) -- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. [#123536](https://github.com/cockroachdb/cockroach/issues/123536) -- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. [#123048](https://github.com/cockroachdb/cockroach/issues/123048) -- [Statement diagnostics]({% link {{ page.version.version }}/explain-analyze.md %}#debug-option) cannot be collected for statements executed inside UDFs or stored procedures. You can request statement diagnostics for the top-level invocation of the function or procedure, and the resulting trace includes spans for each statement executed. However, there is no way to target statements executed inside the function or procedure with a statement diagnostics request. [#159526](https://github.com/cockroachdb/cockroach/issues/159526) -- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. [#162627](https://github.com/cockroachdb/cockroach/issues/162627) \ No newline at end of file +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- Routines cannot be created with an `OUT` parameter of type `RECORD`. #123448 +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or stored procedures. #110080 +- Polymorphic types cannot be cast to other types (e.g., `TEXT`) within routine parameters. #123536 +- Routine parameters and return types cannot be declared using the `ANYENUM` polymorphic type, which is able to match any [`ENUM`]({% link {{ page.version.version }}/enum.md %}) type. #123048 +- [Statement diagnostics]({% link {{ page.version.version }}/explain-analyze.md %}#debug-option) cannot be collected for statements executed inside UDFs or stored procedures. You can request statement diagnostics for the top-level invocation of the function or procedure, and the resulting trace includes spans for each statement executed. However, there is no way to target statements executed inside the function or procedure with a statement diagnostics request. #159526 +- Statements within routines do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the routine. #162627 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/row-level-ttl-limitations.md b/src/current/_includes/v26.2/known-limitations/row-level-ttl-limitations.md index c386ba576d7..4460901566d 100644 --- a/src/current/_includes/v26.2/known-limitations/row-level-ttl-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/row-level-ttl-limitations.md @@ -1,5 +1,5 @@ - Any queries you run against tables with Row-Level TTL enabled (or against tables that have [foreign keys]({% link {{page.version.version}}/foreign-key.md %}) that reference TTL-enabled tables) do not filter out expired rows from the result set (this includes [`UPDATE`s]({% link {{ page.version.version }}/update.md %}) and [`DELETE`s]({% link {{ page.version.version }}/delete.md %})). This feature may be added in a future release. For now, follow the instructions in [Filter out expired rows from a selection query]({% link {{ page.version.version }}/row-level-ttl.md %}#filter-out-expired-rows-from-a-selection-query). -- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. [#101372](https://github.com/cockroachdb/cockroach/issues/101372) +- Tables with Row-Level TTL can be referenced by [foreign keys]({% link {{page.version.version}}/foreign-key.md %}). TTL deletes are issued as regular [`DELETE`]({% link {{ page.version.version }}/delete.md %}) statements, so inbound foreign keys apply. If an inbound foreign key uses `ON DELETE RESTRICT` and referencing rows exist, the TTL job fails with a foreign key violation. #101372 - Enabling Row-Level TTL on a table with multiple [secondary indexes]({% link {{ page.version.version }}/indexes.md %}) can have negative performance impacts on a cluster, including increased [latency]({% link {{ page.version.version }}/common-issues-to-monitor.md %}#service-latency) and [contention]({% link {{ page.version.version }}/performance-best-practices-overview.md %}#transaction-contention). This is particularly true for large tables with terabytes of data and billions of rows that are split up into multiple ranges across separate nodes. - Increased latency may occur because secondary indexes aren't necessarily stored on the same underlying [ranges]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-range) as a table's [primary indexes]({% link {{ page.version.version }}/indexes.md %}). Further, the secondary indexes' ranges may have [leaseholders]({% link {{ page.version.version }}/architecture/overview.md %}#architecture-leaseholder) located on different nodes than the primary index. - Increased contention may occur because [intents]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#write-intents) must be written as part of performing the deletions. diff --git a/src/current/_includes/v26.2/known-limitations/savepoint-limitations.md b/src/current/_includes/v26.2/known-limitations/savepoint-limitations.md index 58451f4ae28..1bc0bdd8d44 100644 --- a/src/current/_includes/v26.2/known-limitations/savepoint-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/savepoint-limitations.md @@ -1 +1 @@ -- Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities) that contain DDL and `ROLLBACK TO SAVEPOINT` are not supported, as they could result in a deadlock. [#46414](https://github.com/cockroachdb/cockroach/issues/46414) +- Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities) that contain DDL and `ROLLBACK TO SAVEPOINT` are not supported, as they could result in a deadlock. #46414 diff --git a/src/current/_includes/v26.2/known-limitations/select-for-update-limitations.md b/src/current/_includes/v26.2/known-limitations/select-for-update-limitations.md index 9c4ab3fd4a1..89152e6fad0 100644 --- a/src/current/_includes/v26.2/known-limitations/select-for-update-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/select-for-update-limitations.md @@ -1,8 +1,8 @@ -- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). [#160961](https://github.com/cockroachdb/cockroach/issues/160961) +- `SKIP LOCKED` cannot be used for tables with multiple [column families]({% link {{ page.version.version }}/column-families.md %}). #160961 - By default under `SERIALIZABLE` isolation, locks acquired using `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` are implemented as fast, in-memory [unreplicated locks](architecture/transaction-layer.html#unreplicated-locks). If a [lease transfer]({% link {{ page.version.version }}/architecture/replication-layer.md %}#leases) or [range split/merge]({% link {{ page.version.version }}/architecture/distribution-layer.md %}#range-merges) occurs on a range held by an unreplicated lock, the lock is dropped. The following behaviors can occur: - The desired ordering of concurrent accesses to one or more rows of a table expressed by your use of `SELECT ... FOR UPDATE` may not be preserved (that is, a transaction _B_ against some table _T_ that was supposed to wait behind another transaction _A_ operating on _T_ may not wait for transaction _A_). - The transaction that acquired the (now dropped) unreplicated lock may fail to commit, leading to [transaction retry errors with code `40001`]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}) and the [`restart transaction` error message]({% link {{ page.version.version }}/common-errors.md %}#restart-transaction). When running under `SERIALIZABLE` isolation, `SELECT ... FOR UPDATE` and `SELECT ... FOR SHARE` locks should be thought of as best-effort, and should not be relied upon for correctness. Note that [serialization]({% link {{ page.version.version }}/demo-serializable.md %}) is preserved despite this limitation. This limitation is fixed when the `enable_durable_locking_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}#enable-durable-locking-for-serializable) is set to `true`. This limitation does **not** apply to [`READ COMMITTED`]({% link {{ page.version.version }}/read-committed.md %}) transactions. - The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroachdb/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) \ No newline at end of file +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/set-transaction-no-rollback.md b/src/current/_includes/v26.2/known-limitations/set-transaction-no-rollback.md index 4f4e5af2aab..966dcd82d48 100644 --- a/src/current/_includes/v26.2/known-limitations/set-transaction-no-rollback.md +++ b/src/current/_includes/v26.2/known-limitations/set-transaction-no-rollback.md @@ -30,4 +30,4 @@ UTC ~~~ - [#69396](https://github.com/cockroachdb/cockroach/issues/69396), [#148766](https://github.com/cockroachdb/cockroach/issues/148766) \ No newline at end of file + #69396, #148766 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/show-backup-symlink.md b/src/current/_includes/v26.2/known-limitations/show-backup-symlink.md index 38ba86fb28f..1c7a5612242 100644 --- a/src/current/_includes/v26.2/known-limitations/show-backup-symlink.md +++ b/src/current/_includes/v26.2/known-limitations/show-backup-symlink.md @@ -1 +1 @@ -[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. [#70260](https://github.com/cockroachdb/cockroach/issues/70260) \ No newline at end of file +[`SHOW BACKUP`]({% link {{ page.version.version }}/show-backup.md %}) does not support listing backups if the [`nodelocal`]({% link {{ page.version.version }}/cockroach-nodelocal-upload.md %}) storage location is a symlink. #70260 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/spatial-limitations.md b/src/current/_includes/v26.2/known-limitations/spatial-limitations.md index 5bc25717912..2fe9152f2cd 100644 --- a/src/current/_includes/v26.2/known-limitations/spatial-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/spatial-limitations.md @@ -1,10 +1,10 @@ -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} - {% include {{ page.version.version }}/known-limitations/geospatial-heterogeneous-architectures.md %} -- [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled on ARM Macs due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [#93161](https://github.com/cockroachdb/cockroach/issues/93161) \ No newline at end of file +- [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled on ARM Macs due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. #93161 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/sql-cursors.md b/src/current/_includes/v26.2/known-limitations/sql-cursors.md index a3e42b9a3ec..d0ce2b3c3af 100644 --- a/src/current/_includes/v26.2/known-limitations/sql-cursors.md +++ b/src/current/_includes/v26.2/known-limitations/sql-cursors.md @@ -1,8 +1,8 @@ CockroachDB implements SQL [cursor]({% link {{ page.version.version }}/cursors.md %}) support with the following limitations: -- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. [#77099](https://github.com/cockroachdb/cockroach/issues/77099) -- Scrollable cursor (also known as reverse `FETCH`) is not supported. [#77102](https://github.com/cockroachdb/cockroach/issues/77102) -- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. [#77103](https://github.com/cockroachdb/cockroach/issues/77103) -- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. [#77104](https://github.com/cockroachdb/cockroach/issues/77104) +- `DECLARE` only supports forward cursors. Reverse cursors created with `DECLARE SCROLL` are not supported. #77102 +- `FETCH` supports forward, relative, and absolute variants, but only for forward cursors. #77102 +- `BINARY CURSOR`, which returns data in the Postgres binary format, is not supported. #77099 +- Scrollable cursor (also known as reverse `FETCH`) is not supported. #77102 +- [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) with a cursor is not supported. #77103 +- Respect for [`SAVEPOINT`s]({% link {{ page.version.version }}/savepoint.md %}) is not supported. Cursor definitions do not disappear properly if rolled back to a `SAVEPOINT` from before they were created. #77104 diff --git a/src/current/_includes/v26.2/known-limitations/sql-guardrails-limitations.md b/src/current/_includes/v26.2/known-limitations/sql-guardrails-limitations.md index 6d8d60ac614..02ebf98bcd7 100644 --- a/src/current/_includes/v26.2/known-limitations/sql-guardrails-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/sql-guardrails-limitations.md @@ -1,2 +1,2 @@ -- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) -- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 +- The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 diff --git a/src/current/_includes/v26.2/known-limitations/srid-4326-limitations.md b/src/current/_includes/v26.2/known-limitations/srid-4326-limitations.md index b556a9fbecd..294a1a53bd4 100644 --- a/src/current/_includes/v26.2/known-limitations/srid-4326-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/srid-4326-limitations.md @@ -1 +1 @@ -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. [#55903](https://github.com/cockroachdb/cockroach/issues/55903) \ No newline at end of file +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. #55903 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/statement-hints-limitations.md b/src/current/_includes/v26.2/known-limitations/statement-hints-limitations.md index 45788512c41..1ada3101f4f 100644 --- a/src/current/_includes/v26.2/known-limitations/statement-hints-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/statement-hints-limitations.md @@ -1,2 +1,2 @@ -- [Statement hints]({% link {{ page.version.version }}/cost-based-optimizer.md %}#statement-hints) do not apply to statements within views. The workaround for `REWRITE INLINE HINTS` is to modify the inline hints directly in the body by replacing the view. There is no workaround for `SET VARIABLE` hints. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) -- [Statement hints]({% link {{ page.version.version }}/cost-based-optimizer.md %}#statement-hints) do not apply to statements within routines. The workaround for `REWRITE INLINE HINTS` is to modify the inline hints directly in the body by replacing the routine. There is no workaround for `SET VARIABLE` hints. [#162627](https://github.com/cockroachdb/cockroach/issues/162627) \ No newline at end of file +- [Statement hints]({% link {{ page.version.version }}/cost-based-optimizer.md %}#statement-hints) do not apply to statements within views. The workaround for `REWRITE INLINE HINTS` is to modify the inline hints directly in the body by replacing the view. There is no workaround for `SET VARIABLE` hints. #166782 +- [Statement hints]({% link {{ page.version.version }}/cost-based-optimizer.md %}#statement-hints) do not apply to statements within routines. The workaround for `REWRITE INLINE HINTS` is to modify the inline hints directly in the body by replacing the routine. There is no workaround for `SET VARIABLE` hints. #162627 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/stats-refresh-upgrade.md b/src/current/_includes/v26.2/known-limitations/stats-refresh-upgrade.md index 3d5a8d26325..3325dfaa53b 100644 --- a/src/current/_includes/v26.2/known-limitations/stats-refresh-upgrade.md +++ b/src/current/_includes/v26.2/known-limitations/stats-refresh-upgrade.md @@ -1 +1 @@ -- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). [#54816](https://github.com/cockroachdb/cockroach/issues/54816) +- The [automatic statistics refresher]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-statistics-refresh-rate) automatically checks whether it needs to refresh statistics for every table in the database upon startup of each node in the cluster. If statistics for a table have not been refreshed in a while, this will trigger collection of statistics for that table. If statistics have been refreshed recently, it will not force a refresh. As a result, the automatic statistics refresher does not necessarily perform a refresh of statistics after an [upgrade]({% link {{ page.version.version }}/upgrade-cockroach-version.md %}). This could cause a problem, for example, if the upgrade moves from a version without [histograms]({% link {{ page.version.version }}/cost-based-optimizer.md %}#control-histogram-collection) to a version with histograms. To refresh statistics manually, use [`CREATE STATISTICS`](create-statistics.html). #54816 diff --git a/src/current/_includes/v26.2/known-limitations/stored-proc-limitations.md b/src/current/_includes/v26.2/known-limitations/stored-proc-limitations.md index b2aae6069c2..74f4d86dfb3 100644 --- a/src/current/_includes/v26.2/known-limitations/stored-proc-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/stored-proc-limitations.md @@ -1,2 +1,2 @@ -- Pausable portals are not supported with `CALL` statements for stored procedures. [#151529](https://github.com/cockroachdb/cockroach/issues/151529) -- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. [#122266](https://github.com/cockroachdb/cockroach/issues/122266) \ No newline at end of file +- Pausable portals are not supported with `CALL` statements for stored procedures. #151529 +- `COMMIT` and `ROLLBACK` statements are not supported within nested procedures. #122266 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/subquery-mutations-limitations.md b/src/current/_includes/v26.2/known-limitations/subquery-mutations-limitations.md index 771eba80bc6..d4deeaee17c 100644 --- a/src/current/_includes/v26.2/known-limitations/subquery-mutations-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/subquery-mutations-limitations.md @@ -2,4 +2,4 @@ - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). - If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) + If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 diff --git a/src/current/_includes/v26.2/known-limitations/transaction-row-count-limitations.md b/src/current/_includes/v26.2/known-limitations/transaction-row-count-limitations.md index ded62d6d9be..a54f15d61a7 100644 --- a/src/current/_includes/v26.2/known-limitations/transaction-row-count-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/transaction-row-count-limitations.md @@ -1 +1 @@ -- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +- The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 diff --git a/src/current/_includes/v26.2/known-limitations/trigger-limitations.md b/src/current/_includes/v26.2/known-limitations/trigger-limitations.md index 9e323abfe9c..dd8b9c74f76 100644 --- a/src/current/_includes/v26.2/known-limitations/trigger-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/trigger-limitations.md @@ -1,6 +1,6 @@ -- Statement-level triggers are not supported. [#126362](https://github.com/cockroachdb/cockroach/issues/126362) -- `INSTEAD OF` triggers are not supported. [#126363](https://github.com/cockroachdb/cockroach/issues/126363) -- Hidden columns are not visible to triggers. [#133331](https://github.com/cockroachdb/cockroach/issues/133331) -- The `REFERENCING` clause for `CREATE TRIGGER` is not supported. [#135655](https://github.com/cockroachdb/cockroach/issues/135655) -- `UPDATE` triggers with a column list (using `UPDATE OF column_name` syntax) are not supported. [#135656](https://github.com/cockroachdb/cockroach/issues/135656) -- Statement-level triggers for `TRUNCATE` events are not supported. [#135657](https://github.com/cockroachdb/cockroach/issues/135657) +- Statement-level triggers are not supported. #126362 +- `INSTEAD OF` triggers are not supported. #126363 +- Hidden columns are not visible to triggers. #133331 +- The `REFERENCING` clause for `CREATE TRIGGER` is not supported. #135655 +- `UPDATE` triggers with a column list (using `UPDATE OF column_name` syntax) are not supported. #135656 +- Statement-level triggers for `TRUNCATE` events are not supported. #135657 diff --git a/src/current/_includes/v26.2/known-limitations/trigram-unsupported-syntax.md b/src/current/_includes/v26.2/known-limitations/trigram-unsupported-syntax.md index 494730c7ae8..a981c4915b0 100644 --- a/src/current/_includes/v26.2/known-limitations/trigram-unsupported-syntax.md +++ b/src/current/_includes/v26.2/known-limitations/trigram-unsupported-syntax.md @@ -6,4 +6,4 @@ - Acceleration on [regex string matching]({% link {{ page.version.version }}/scalar-expressions.md %}#string-matching-using-posix-regular-expressions). - `%` comparisons, `show_trgm`, and trigram index creation on [collated strings]({% link {{ page.version.version }}/collate.md %}). -[#41285](https://github.com/cockroachdb/cockroach/issues/41285) \ No newline at end of file +#41285 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/udf-limitations.md b/src/current/_includes/v26.2/known-limitations/udf-limitations.md index 0f44ac320f5..c212736717b 100644 --- a/src/current/_includes/v26.2/known-limitations/udf-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/udf-limitations.md @@ -1,9 +1,9 @@ -- User-defined functions are not supported in partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). [#151686](https://github.com/cockroachdb/cockroach/issues/151686) -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- User-defined functions are not supported in partial index predicates. #155488 +- Views cannot reference a UDF that contains mutation statements (`INSERT`, `UPDATE`, `UPSERT`, `DELETE`). #151686 +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 - User-defined functions are not currently supported in: - - Expressions (column, index, constraint) in tables. [#87699](https://github.com/cockroachdb/cockroach/issues/87699) - - Partial index predicates. [#155488](https://github.com/cockroachdb/cockroach/issues/155488) -- User-defined functions cannot call themselves recursively. [#93049](https://github.com/cockroachdb/cockroach/issues/93049) -- The `setval` function cannot be resolved when used inside UDF bodies. [#110860](https://github.com/cockroachdb/cockroach/issues/110860) -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) \ No newline at end of file + - Expressions (column, index, constraint) in tables. #87699 + - Partial index predicates. #155488 +- User-defined functions cannot call themselves recursively. #93049 +- The `setval` function cannot be resolved when used inside UDF bodies. #110860 +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) in UDFs is not supported. #108184 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/vector-limitations.md b/src/current/_includes/v26.2/known-limitations/vector-limitations.md index 97ed7c47599..053087efe7e 100644 --- a/src/current/_includes/v26.2/known-limitations/vector-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/vector-limitations.md @@ -1,5 +1,5 @@ - {% include {{ page.version.version }}/sql/vector-batch-inserts.md %} -- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. [#145227](https://github.com/cockroachdb/cockroach/issues/145227) -- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. [#147839](https://github.com/cockroachdb/cockroach/issues/147839) -- Index acceleration with filters is only supported if the filters match prefix columns. [#146145](https://github.com/cockroachdb/cockroach/issues/146145) -- Index recommendations are not provided for vector indexes. [#146146](https://github.com/cockroachdb/cockroach/issues/146146) \ No newline at end of file +- `IMPORT INTO` is not supported on tables with vector indexes. You can import the vectors first and create the index after import is complete. #145227 +- The distance functions `vector_l1_ops`, `bit_hamming_ops`, and `bit_jaccard_ops` are not implemented. #147839 +- Index acceleration with filters is only supported if the filters match prefix columns. #146145 +- Index recommendations are not provided for vector indexes. #146146 \ No newline at end of file diff --git a/src/current/_includes/v26.2/known-limitations/vectorized-engine-limitations.md b/src/current/_includes/v26.2/known-limitations/vectorized-engine-limitations.md index 111226546cf..250e4b5ee28 100644 --- a/src/current/_includes/v26.2/known-limitations/vectorized-engine-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/vectorized-engine-limitations.md @@ -1,2 +1,2 @@ -- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). [#38018](https://github.com/cockroachdb/cockroach/issues/38018) +- The vectorized engine does not support queries containing a join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). #38018 - The vectorized engine does not support [working with spatial data]({% link {{ page.version.version }}/query-spatial-data.md %}). Queries with [geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) or [spatial data]({% link {{ page.version.version }}/query-spatial-data.md %}) will revert to the row-oriented execution engine. diff --git a/src/current/_includes/v26.2/known-limitations/view-limitations.md b/src/current/_includes/v26.2/known-limitations/view-limitations.md index 946f885d25a..e7c77feb0a5 100644 --- a/src/current/_includes/v26.2/known-limitations/view-limitations.md +++ b/src/current/_includes/v26.2/known-limitations/view-limitations.md @@ -1,2 +1,2 @@ -- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) (including ENUMs) in views is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) -- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. [#166782](https://github.com/cockroachdb/cockroach/issues/166782) \ No newline at end of file +- Casting subqueries to [user-defined types]({% link {{ page.version.version }}/create-type.md %}) (including ENUMs) in views is not supported. #108184 +- Statements within views do not currently respect hint injections. The workaround is to modify the inline hints directly in the body by replacing the view. #166782 \ No newline at end of file diff --git a/src/current/_includes/v26.2/misc/tooling.md b/src/current/_includes/v26.2/misc/tooling.md index 254a6425840..6981a90f68f 100644 --- a/src/current/_includes/v26.2/misc/tooling.md +++ b/src/current/_includes/v26.2/misc/tooling.md @@ -23,7 +23,7 @@ Customers should contact their account team before moving production workloads t Unless explicitly stated, support for a [driver](#drivers) or [data access framework](#data-access-frameworks-e-g-orms) does not include [automatic, client-side transaction retry handling]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}#client-side-retry-handling). For client-side transaction retry handling samples, see [Develop with CockroachDB]({% link {{ page.version.version }}/developer-guide-overview.md %}). {{site.data.alerts.end}} -If you encounter problems using CockroachDB with any of the tools listed on this page, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward better support. +If you encounter problems using CockroachDB with any of the tools listed on this page, please open an issue with details to help us make progress toward better support. For a list of tools supported by the CockroachDB community, see [Third-Party Tools Supported by the Community]({% link {{ page.version.version }}/community-tooling.md %}). @@ -32,23 +32,23 @@ For a list of tools supported by the CockroachDB community, see [Third-Party Too | Language | Driver | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+--------+-----------------------+---------------------+---------------------+----------| | C | [libpq](http://www.postgresql.org/docs/13/static/libpq.html)| PostgreSQL 13 | Partial | N/A | N/A | -| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/npgsql.go ||var npgsqlSupportedTag = "v||"\n\n %} | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | -| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgx.go ||var supportedPGXTag = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/libpq.go ||var libPQSupportedTag = "||"\n\n %} | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | -| Java | [JDBC](https://jdbc.postgresql.org/download/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/pgjdbc.go ||var supportedPGJDBCTag = "||"\n\n %} | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | +| C# (.NET) | [Npgsql](https://www.nuget.org/packages/Npgsql/) | 7.0.2 | Full | N/A | [Build a C# App with CockroachDB (Npgsql)](build-a-csharp-app-with-cockroachdb.html) | +| Go | [pgx](https://github.com/jackc/pgx/releases)


[pq](https://github.com/lib/pq) | v5.3.1
(use latest version of CockroachDB adapter)
v1.10.5 | Full


Full | [`crdbpgx`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbpgx)
(includes client-side transaction retry handling)
N/A | [Build a Go App with CockroachDB (pgx)](build-a-go-app-with-cockroachdb.html)


[Build a Go App with CockroachDB (pq)](build-a-go-app-with-cockroachdb-pq.html) | +| Java | [JDBC](https://jdbc.postgresql.org/download/) | REL42.7.3 | Full | N/A | [Build a Java App with CockroachDB (JDBC)](build-a-java-app-with-cockroachdb.html) | | JavaScript | [pg](https://www.npmjs.com/package/pg) | 8.2.1 | Full | N/A | [Build a Node.js App with CockroachDB (pg)](build-a-nodejs-app-with-cockroachdb.html) | -| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/asyncpg.go || var asyncpgSupportedTag = "||"\n\n %} | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | -| Ruby | [pg](https://rubygems.org/gems/pg) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/ruby_pg.go ||var rubyPGVersion = "||"\n\n %} | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | +| Python | [psycopg3](https://www.psycopg.org/psycopg3/docs/)


[psycopg2](https://www.psycopg.org/docs/install.html)


[asyncpg](https://magicstack.github.io/asyncpg/current/index.html) | 3.0.16


2.8.6


v0.24.0 | Full


Full


Partial | N/A


N/A


N/A | [Build a Python App with CockroachDB (psycopg3)](build-a-python-app-with-cockroachdb-psycopg3.html)


[Build a Python App with CockroachDB (psycopg2)](build-a-python-app-with-cockroachdb.html)


[Build a Python App with CockroachDB (asyncpg)](build-a-python-app-with-cockroachdb-asyncpg.html) | +| Ruby | [pg](https://rubygems.org/gems/pg) | v1.4.6 | Full | N/A | [Build a Ruby App with CockroachDB (pg)](build-a-ruby-app-with-cockroachdb.html) | | Rust | [rust-postgres](https://github.com/sfackler/rust-postgres)


[sqlx](https://github.com/launchbadge/sqlx) | 0.19.2


0.8.6 | Partial


Partial | N/A


N/A | [Build a Rust App with CockroachDB]({% link {{ page.version.version }}/build-a-rust-app-with-cockroachdb.md %})


N/A | ## Data access frameworks (e.g., ORMs) | Language | Framework | Latest tested version | Support level | CockroachDB adapter | Tutorial | |----------+-----------+-----------------------+---------------+---------------------+----------| -| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gorm.go ||var gormSupportedTag = "||"\n\n %}


{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/gopg.go ||var gopgSupportedTag = "||"\n\n %}
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | -| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/hibernate.go ||var supportedHibernateTag = "||"\n\n %} (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | -| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/sequelize.go ||var supportedSequelizeCockroachDBRelease = "||"\n\n %}
(use latest version of CockroachDB adapter)
{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/knex.go ||const supportedKnexTag = "||"\n\n %}
3.14.0
0.3.17 {% comment %}{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/typeorm.go ||const supportedTypeORMRelease = "||"\n %}{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | -| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/activerecord.go ||var supportedRailsVersion = "||"\nvar %}
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | -| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | {% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/cmd/roachtest/tests/django.go ||var djangoSupportedTag = "cockroach-||"\nvar %}
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | +| Go | [GORM](https://github.com/jinzhu/gorm/releases)


[go-pg](https://github.com/go-pg/pg)
[upper/db](https://github.com/upper/db) | v1.24.1


v10.9.0
v4 | Full


Full
Full | [`crdbgorm`](https://pkg.go.dev/github.com/cockroachdb/cockroach-go/crdb/crdbgorm)
(includes client-side transaction retry handling)
N/A
N/A | [Build a Go App with CockroachDB (GORM)](build-a-go-app-with-cockroachdb-gorm.html)


N/A
[Build a Go App with CockroachDB (upper/db)](build-a-go-app-with-cockroachdb-upperdb.html) | +| Java | [Hibernate](https://hibernate.org/orm/)
(including [Hibernate Spatial](https://docs.jboss.org/hibernate/orm/current/userguide/html_single/Hibernate_User_Guide.html#spatial))
[jOOQ](https://www.jooq.org/)
[MyBatis](https://mybatis.org/mybatis-3/) | 6.6.20 (must be at least 5.4.19)


3.13.2 (must be at least 3.13.0)
3.5.5| Full


Full
Full | N/A


N/A
N/A | [Build a Java App with CockroachDB (Hibernate)](build-a-java-app-with-cockroachdb-hibernate.html)


[Build a Java App with CockroachDB (jOOQ)](build-a-java-app-with-cockroachdb-jooq.html)
[Build a Spring App with CockroachDB (MyBatis)]({% link {{ page.version.version }}/build-a-spring-app-with-cockroachdb-mybatis.md %}) | +| JavaScript/TypeScript | [Sequelize](https://www.npmjs.com/package/sequelize)


[Knex.js](https://knexjs.org/)
[Prisma](https://prisma.io)
[TypeORM](https://www.npmjs.com/package/typeorm) | v6.0.5
(use latest version of CockroachDB adapter)
2.5.1
3.14.0
0.3.17 {% comment %}remove-unsafe-crdb-setting{% endcomment %} | Full


Full
Full
Full | [`sequelize-cockroachdb`](https://www.npmjs.com/package/sequelize-cockroachdb)


N/A
N/A
N/A | [Build a Node.js App with CockroachDB (Sequelize)](build-a-nodejs-app-with-cockroachdb-sequelize.html)


[Build a Node.js App with CockroachDB (Knex.js)](build-a-nodejs-app-with-cockroachdb-knexjs.html)
[Build a Node.js App with CockroachDB (Prisma)](build-a-nodejs-app-with-cockroachdb-prisma.html)
[Build a TypeScript App with CockroachDB (TypeORM)](build-a-typescript-app-with-cockroachdb.html) | +| Ruby | [ActiveRecord](https://rubygems.org/gems/activerecord)
[RGeo/RGeo-ActiveRecord](https://github.com/cockroachdb/activerecord-cockroachdb-adapter#working-with-spatial-data) | 8.1.0
(use latest version of CockroachDB adapter) | Full | [`activerecord-cockroachdb-adapter`](https://rubygems.org/gems/activerecord-cockroachdb-adapter)
(includes client-side transaction retry handling) | [Build a Ruby App with CockroachDB (ActiveRecord)](build-a-ruby-app-with-cockroachdb-activerecord.html) | +| Python | [Django](https://pypi.org/project/Django/)
(including [GeoDjango](https://docs.djangoproject.com/en/3.1/ref/contrib/gis/))
[peewee](https://github.com/coleifer/peewee/)
[SQLAlchemy](https://www.sqlalchemy.org/) | 4.1.x
(use latest version of CockroachDB adapter)

3.13.3
0.7.13
1.4.17
(use latest version of CockroachDB adapter) | Full


Full
Full
Full | [`django-cockroachdb`](https://pypi.org/project/django-cockroachdb/)


N/A
N/A
[`sqlalchemy-cockroachdb`](https://pypi.org/project/sqlalchemy-cockroachdb)
(includes client-side transaction retry handling) | [Build a Python App with CockroachDB (Django)](build-a-python-app-with-cockroachdb-django.html)


N/A (See [peewee docs](http://docs.peewee-orm.com/en/latest/peewee/playhouse.html#cockroach-database).)
[Build a Python App with CockroachDB (SQLAlchemy)](build-a-python-app-with-cockroachdb-sqlalchemy.html) | ## Graphical user interfaces (GUIs) diff --git a/src/current/_includes/v26.2/orchestration/start-cockroachdb-insecure.md b/src/current/_includes/v26.2/orchestration/start-cockroachdb-insecure.md index 3406d48edbb..c9af5cc0267 100644 --- a/src/current/_includes/v26.2/orchestration/start-cockroachdb-insecure.md +++ b/src/current/_includes/v26.2/orchestration/start-cockroachdb-insecure.md @@ -1,10 +1,10 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it. - Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml): + Download [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ {{site.data.alerts.callout_info}} @@ -27,11 +27,11 @@ Alternatively, if you'd rather start with a configuration file that has been customized for performance: - 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): + 1. Download our [performance version of `cockroachdb-statefulset-insecure.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml ~~~ 1. Modify the file wherever there is a `TODO` comment. @@ -72,12 +72,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v26.2/orchestration/start-cockroachdb-local-insecure.md b/src/current/_includes/v26.2/orchestration/start-cockroachdb-local-insecure.md index 552cb3cd25f..196a53bce4c 100644 --- a/src/current/_includes/v26.2/orchestration/start-cockroachdb-local-insecure.md +++ b/src/current/_includes/v26.2/orchestration/start-cockroachdb-local-insecure.md @@ -1,8 +1,8 @@ -1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: +1. From your local workstation, use our [`cockroachdb-statefulset.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml) file to create the StatefulSet that automatically creates 3 pods, each with a CockroachDB node running inside it: {% include_cached copy-clipboard.html %} ~~~ shell - $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cockroachdb-statefulset.yaml + $ kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml ~~~ ~~~ @@ -41,12 +41,12 @@ pvc-5315efda-8bd5-11e6-a4f4-42010a800002 1Gi RWO Delete Bound default/datadir-cockroachdb-2 27s ~~~ -1. Use our [`cluster-init.yaml`](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: +1. Use our [`cluster-init.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml) file to perform a one-time initialization that joins the CockroachDB nodes into a single cluster: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/cluster-init.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml ~~~ ~~~ diff --git a/src/current/_includes/v26.2/orchestration/start-cockroachdb-secure.md b/src/current/_includes/v26.2/orchestration/start-cockroachdb-secure.md index 972cabc2d8e..a7299e1aa25 100644 --- a/src/current/_includes/v26.2/orchestration/start-cockroachdb-secure.md +++ b/src/current/_includes/v26.2/orchestration/start-cockroachdb-secure.md @@ -1,10 +1,10 @@ ### Configure the cluster -1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): +1. Download and modify our [StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/cockroachdb-statefulset.yaml ~~~ 1. Update `secretName` with the name of the corresponding node secret. diff --git a/src/current/_includes/v26.2/orchestration/test-cluster-secure.md b/src/current/_includes/v26.2/orchestration/test-cluster-secure.md index f255d8d62fc..c146d634626 100644 --- a/src/current/_includes/v26.2/orchestration/test-cluster-secure.md +++ b/src/current/_includes/v26.2/orchestration/test-cluster-secure.md @@ -42,7 +42,7 @@ $ kubectl create \ {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl create \ --f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/bring-your-own-certs/client.yaml +-f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/bring-your-own-certs/client.yaml ~~~ ~~~ diff --git a/src/current/_includes/v26.2/prod-deployment/decommission-pre-flight-checks.md b/src/current/_includes/v26.2/prod-deployment/decommission-pre-flight-checks.md index b267379384b..70e35e4b72e 100644 --- a/src/current/_includes/v26.2/prod-deployment/decommission-pre-flight-checks.md +++ b/src/current/_includes/v26.2/prod-deployment/decommission-pre-flight-checks.md @@ -14,5 +14,5 @@ Failed running "node decommission" These checks can be skipped by [passing the flag `--checks=skip` to `cockroach node decommission`]({% link {{ page.version.version }}/cockroach-node.md %}#decommission-checks). {{site.data.alerts.callout_info}} -The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see [cockroachdb/cockroach#71757](https://github.com/cockroachdb/cockroach/issues/71757) +The amount of remaining disk space on other nodes in the cluster is not yet considered as part of the decommissioning pre-flight checks. For more information, see cockroachdb/cockroach#71757 {{site.data.alerts.end}} diff --git a/src/current/_includes/v26.2/sql/savepoints-and-high-priority-transactions.md b/src/current/_includes/v26.2/sql/savepoints-and-high-priority-transactions.md index c6de489e641..43f09ea415f 100644 --- a/src/current/_includes/v26.2/sql/savepoints-and-high-priority-transactions.md +++ b/src/current/_includes/v26.2/sql/savepoints-and-high-priority-transactions.md @@ -1 +1 @@ -[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue [#46414](https://www.github.com/cockroachdb/cockroach/issues/46414). +[`ROLLBACK TO SAVEPOINT`]({% link {{ page.version.version }}/rollback-transaction.md %}#rollback-a-nested-transaction) (for either regular savepoints or "restart savepoints" defined with `cockroach_restart`) causes a "feature not supported" error after a DDL statement in a [`HIGH PRIORITY` transaction]({% link {{ page.version.version }}/transactions.md %}#transaction-priorities), in order to avoid a transaction deadlock. For more information, see GitHub issue #46414. diff --git a/src/current/_includes/v26.2/sql/unsupported-postgres-features.md b/src/current/_includes/v26.2/sql/unsupported-postgres-features.md index a89650e38e9..9ad89047189 100644 --- a/src/current/_includes/v26.2/sql/unsupported-postgres-features.md +++ b/src/current/_includes/v26.2/sql/unsupported-postgres-features.md @@ -1,10 +1,10 @@ ### `CREATE DOMAIN` -CockroachDB does not support `CREATE DOMAIN`. Tracking issue: [cockroachdb/cockroach#108659](https://github.com/cockroachdb/cockroach/issues/108659). +CockroachDB does not support `CREATE DOMAIN`. Tracking issue: cockroachdb/cockroach#108659. ### PostgreSQL range types -CockroachDB does not support PostgreSQL range types. Tracking issue: [cockroachdb/cockroach#128638](https://github.com/cockroachdb/cockroach/issues/128638). +CockroachDB does not support PostgreSQL range types. Tracking issue: cockroachdb/cockroach#128638. ### Other unsupported features diff --git a/src/current/advisories/a101963.md b/src/current/advisories/a101963.md index f404a8a28d1..75a292f38bc 100644 --- a/src/current/advisories/a101963.md +++ b/src/current/advisories/a101963.md @@ -19,13 +19,13 @@ In the following circumstances, backups that include [revision history](https:// ## Statement -This is resolved in CockroachDB by [#102179](https://github.com/cockroachdb/cockroach/pull/102179), which ensures that `RESTORE` operations can correctly restore from backups that include this incorrect metadata. +This is resolved in CockroachDB by #102179, which ensures that `RESTORE` operations can correctly restore from backups that include this incorrect metadata. The fix has been applied to maintenance releases of CockroachDB: [v22.2.9](https://www.cockroachlabs.com/docs/releases/v22.2#v22-2-9). This fix will be applied to maintenance releases of CockroachDB: v22.1.20. Until that version is available, refer to [Mitigation](#mitigation) for a suggested workaround. -This public issue is tracked by [#101963](https://github.com/cockroachdb/cockroach/issues/101963). +This public issue is tracked by #101963. ## Mitigation diff --git a/src/current/advisories/a102375.md b/src/current/advisories/a102375.md index ec031327deb..1ac6d7165d3 100644 --- a/src/current/advisories/a102375.md +++ b/src/current/advisories/a102375.md @@ -16,13 +16,13 @@ In CockroachDB versions v22.1.19 and v22.2.8, some customers may experience spur ## Statement -This is resolved in CockroachDB by PR [#102405](https://github.com/cockroachdb/cockroach/issues/102405) which ensures that privilege checks happen after staleness checks when attempting to use the query cache. +This is resolved in CockroachDB by PR #102405 which ensures that privilege checks happen after staleness checks when attempting to use the query cache. The fix has been applied to the maintenance release of CockroachDB [v22.2.9](https://www.cockroachlabs.com/docs/releases/v22.2#v22-2-9). This fix will be applied to the maintenance release of CockroachDB v22.1.20. -This public issue is tracked by [#102375](https://github.com/cockroachdb/cockroach/issues/102375). +This public issue is tracked by #102375. ## Mitigation diff --git a/src/current/advisories/a103220.md b/src/current/advisories/a103220.md index 877b5b6695f..b7a59712884 100644 --- a/src/current/advisories/a103220.md +++ b/src/current/advisories/a103220.md @@ -15,11 +15,11 @@ In CockroachDB v23.1.0 and its testing versions, inserting rows into a multi-[co ## Statement -This is resolved in CockroachDB by PR [103323](https://github.com/cockroachdb/cockroach/pull/103323). +This is resolved in CockroachDB by PR 103323. This fix has been applied to maintenance releases of CockroachDB [v23.1.1](https://www.cockroachlabs.com/docs/releases/v23.1#v23-1-1) and later. -This public issue is tracked by [#103220](https://github.com/cockroachdb/cockroach/issues/103220). +This public issue is tracked by #103220. ## Mitigation diff --git a/src/current/advisories/a104309.md b/src/current/advisories/a104309.md index 7fd8b7b5feb..e5ba1bf5c4a 100644 --- a/src/current/advisories/a104309.md +++ b/src/current/advisories/a104309.md @@ -47,7 +47,7 @@ Versions prior to 22.2 are no longer eligible for [maintenance support](https:// ## Statement -This is resolved in CockroachDB by [PR #117612](https://github.com/cockroachdb/cockroach/pull/117612), which uses a barrier command to ensure that all historical and ongoing range writes have been applied to the local replica and emitted before the resolved timestamp is advanced and a checkpoint is emitted. +This is resolved in CockroachDB by PR #117612, which uses a barrier command to ensure that all historical and ongoing range writes have been applied to the local replica and emitted before the resolved timestamp is advanced and a checkpoint is emitted. The fix has been applied to maintenance releases of CockroachDB: @@ -63,7 +63,7 @@ However, the initial fix introduced a bug that could cause a rangefeed’s resol Users are encouraged to upgrade to a version that contains both fixes. -This public issue is tracked by [Issue #104309](https://github.com/cockroachdb/cockroach/issues/104309). +This public issue is tracked by Issue #104309. ## Mitigation @@ -90,7 +90,7 @@ If data is found, it can be re-emitted in two ways: {{site.data.alerts.callout_info}} -This fix introduces a [bug](https://github.com/cockroachdb/cockroach/issues/119536) that could cause a rangefeed’s resolved timestamp to stop advancing. The corresponding changefeed will appear to be stalled in `RUNNING` state in certain conditions: If a rangefeed is running on a follower on a recently-merged range, and the rangefeed encounters an aborted transaction, then the resolved timestamp may stall. Events such as row updates will still be emitted as normal, but new checkpoints will not be emitted. +This fix introduces a bug that could cause a rangefeed’s resolved timestamp to stop advancing. The corresponding changefeed will appear to be stalled in `RUNNING` state in certain conditions: If a rangefeed is running on a follower on a recently-merged range, and the rangefeed encounters an aborted transaction, then the resolved timestamp may stall. Events such as row updates will still be emitted as normal, but new checkpoints will not be emitted. That bug is fixed in the following versions: @@ -120,7 +120,7 @@ If your cluster experiences a stalled rangefeed or changefeed after upgrading, y As an alternative to avoid disruption to the changefeed, you can temporarily disable `kv.rangefeed.push_txns.barrier.enabled` to disable the fix to this advisory, **a104309**, until a fix to the stalled rangefeed bug is available. -This issue is tracked by [Issue #119536](https://github.com/cockroachdb/cockroach/issues/119536). +This issue is tracked by Issue #119536. {{site.data.alerts.end}} ## Impact diff --git a/src/current/advisories/a106617.md b/src/current/advisories/a106617.md index 43ce154346e..5eb69e00da5 100644 --- a/src/current/advisories/a106617.md +++ b/src/current/advisories/a106617.md @@ -20,11 +20,11 @@ The corruption is limited to a single store, allowing recovery through [decommis ## Statement -This is resolved in CockroachDB by [#107249](https://github.com/cockroachdb/cockroach/pull/107249), which fixes the ordering of steps during encryption-at-rest log rotation. +This is resolved in CockroachDB by #107249, which fixes the ordering of steps during encryption-at-rest log rotation. The fix has been applied to maintenance releases of CockroachDB [v23.1.8](../releases/v23.1.html#v23-1-8), [v22.2.13](../releases/v22.2.html#v22-2-13), v22.1.22. -This public issue is tracked by [#106617](https://github.com/cockroachdb/cockroach/issues/106617). +This public issue is tracked by #106617. ## Mitigation diff --git a/src/current/advisories/a110363.md b/src/current/advisories/a110363.md index 0d3e04692c2..bf4450a9c3d 100644 --- a/src/current/advisories/a110363.md +++ b/src/current/advisories/a110363.md @@ -22,11 +22,11 @@ This issue affects clusters that meet all of the following conditions: ## Statement -This is resolved in CockroachDB by [#110364](https://github.com/cockroachdb/cockroach/pull/110364), which prevents deletion of the TTL field upon upgrade. +This is resolved in CockroachDB by #110364, which prevents deletion of the TTL field upon upgrade. The fix will be available in CockroachDB v23.1.10. -This public issue is tracked by [#110363](https://github.com/cockroachdb/cockroach/issues/110363). +This public issue is tracked by #110363. ## Mitigation diff --git a/src/current/advisories/a114393.md b/src/current/advisories/a114393.md index 139cb8a57d6..adbda2e6b4c 100644 --- a/src/current/advisories/a114393.md +++ b/src/current/advisories/a114393.md @@ -20,11 +20,11 @@ A critical bug has been identified in the [optimizer](../v23.1/cost-based-optimi ## Statement -This is resolved in CockroachDB by [#114394](https://github.com/cockroachdb/cockroach/pull/114394). +This is resolved in CockroachDB by #114394. This fix has been applied to maintenance release of CockroachDB [v23.1.13](../releases/v23.1.html#v23-1-13), which fixes a bug that could cause a query plan to skip scanning rows from the local region when performing a lookup join with a `REGIONAL BY ROW` table as the input. -This public issue is tracked by [#114393](https://github.com/cockroachdb/cockroach/issues/114393). +This public issue is tracked by #114393. ## Mitigation diff --git a/src/current/advisories/a122372.md b/src/current/advisories/a122372.md index 4bc217b89c4..e4c677c3a63 100644 --- a/src/current/advisories/a122372.md +++ b/src/current/advisories/a122372.md @@ -14,15 +14,15 @@ Publication date: {{ page.advisory_date | date: "%B %e, %Y" }} In all versions of CockroachDB v23.1, v23.2.0 to v23.2.9, v24.1.0 to v24.1.3, and testing versions of v24.2 through v24.2.0-beta.3, [changefeeds]({% link v24.2/change-data-capture-overview.md %}) could emit events on the same row out of order in some cases, which violates [changefeed ordering guarantees]({% link v24.2/changefeed-messages.md %}#per-key-ordering). This issue was caused by a [bug in the sarama Kafka client library](https://github.com/IBM/sarama/issues/2619), a third-party library that CockroachDB uses to talk to Kafka clusters. The bug manifested when a workload had mutations to the same key in rapid succession, because it was possible for [sarama](https://github.com/IBM/sarama) to re-order the messages if it encountered a retryable Kafka error. -To resolve this issue, CockroachDB now uses a new version of the Kafka sink that uses [franz-go](https://github.com/twmb/franz-go), a different third-party Kafka client library that does not have this bug. The new Kafka sink can be enabled using the cluster setting [`changefeed.new_kafka_sink.enabled`]({% link v24.2/show-cluster-setting.md %}). The new Kafka sink was added in PR [#126213](https://github.com/cockroachdb/cockroach/pull/126213). +To resolve this issue, CockroachDB now uses a new version of the Kafka sink that uses [franz-go](https://github.com/twmb/franz-go), a different third-party Kafka client library that does not have this bug. The new Kafka sink can be enabled using the cluster setting [`changefeed.new_kafka_sink.enabled`]({% link v24.2/show-cluster-setting.md %}). The new Kafka sink was added in PR #126213. ## Statement -This is resolved in CockroachDB by PR [#126213](https://github.com/cockroachdb/cockroach/pull/126213), which replaces the Kafka client library [sarama](https://github.com/IBM/sarama) with [franz-go](https://github.com/twmb/franz-go). +This is resolved in CockroachDB by PR #126213, which replaces the Kafka client library [sarama](https://github.com/IBM/sarama) with [franz-go](https://github.com/twmb/franz-go). The fix has been applied to maintenance releases of CockroachDB v23.2.10, v24.1.4, and v24.2.0-rc.1. -This public issue is tracked by issue [#122372](https://github.com/cockroachdb/cockroach/issues/122372). +This public issue is tracked by issue #122372. ## Mitigation diff --git a/src/current/advisories/a123371.md b/src/current/advisories/a123371.md index ee5c9ad2a14..af0e353bd4e 100644 --- a/src/current/advisories/a123371.md +++ b/src/current/advisories/a123371.md @@ -12,7 +12,7 @@ Publication date: {{ page.advisory_date | date: "%B %e, %Y" }} ## Description -In all versions of CockroachDB v22.2, v23.1.0 to v23.1.21, v23.2.0 to v23.2.5, and testing versions of v24.1 through v24.1.0-rc.1, changefeeds could drop events during the initial scan in some cases, causing changefeed consumers to receive incomplete data. This bug was caused by a [code change](https://github.com/cockroachdb/cockroach/commit/0eda54018b9676f855efcd90bfdd0c486c97bfdd) to reduce the number of duplicates sent during an initial scan if the changefeed needed to restart by using the checkpoint to determine which spans could be skipped when the job resumed. This change led to some non-determinism in another part of the codebase that would sometimes incorrectly forward the progress of every span a node was tracking to the lowest checkpoint timestamp when some spans may not have been scanned yet. This bug is now fixed by [PR #123625](https://github.com/cockroachdb/cockroach/pull/123625). +In all versions of CockroachDB v22.2, v23.1.0 to v23.1.21, v23.2.0 to v23.2.5, and testing versions of v24.1 through v24.1.0-rc.1, changefeeds could drop events during the initial scan in some cases, causing changefeed consumers to receive incomplete data. This bug was caused by a code change to reduce the number of duplicates sent during an initial scan if the changefeed needed to restart by using the checkpoint to determine which spans could be skipped when the job resumed. This change led to some non-determinism in another part of the codebase that would sometimes incorrectly forward the progress of every span a node was tracking to the lowest checkpoint timestamp when some spans may not have been scanned yet. This bug is now fixed by PR #123625. Symptoms of the bug: @@ -35,9 +35,9 @@ Factors that increase the likelihood the bug occurring: ## Statement -This is resolved in CockroachDB by [PR #123625](https://github.com/cockroachdb/cockroach/pull/123625) which prevents incorrect forwarding of progress for spans that have not been scanned yet by the initial scan. +This is resolved in CockroachDB by PR #123625 which prevents incorrect forwarding of progress for spans that have not been scanned yet by the initial scan. The fix has been applied to maintenance releases of CockroachDB v23.1.22, v23.2.6, and v24.1.0-rc.2. -This public issue is tracked by [#123371](https://github.com/cockroachdb/cockroach/issues/123371). +This public issue is tracked by #123371. ## Mitigation diff --git a/src/current/advisories/a131639.md b/src/current/advisories/a131639.md index 6564386f581..0550531bcb5 100644 --- a/src/current/advisories/a131639.md +++ b/src/current/advisories/a131639.md @@ -35,7 +35,7 @@ Versions prior to 23.1 are no longer eligible for [maintenance support]({% link ## Statement -[In #123442](https://github.com/cockroachdb/cockroach/commit/6dd54b46cc56b7d2b302e0d5ec1509658a1c86f7), we resolved an issue with CockroachDB in the expiration-to-epoch lease promotion transition process, where a lease's effective expiration could be allowed to regress, resulting in two nodes believing they are the leaseholder for a range. +In #123442, we resolved an issue with CockroachDB in the expiration-to-epoch lease promotion transition process, where a lease's effective expiration could be allowed to regress, resulting in two nodes believing they are the leaseholder for a range. The patch has been applied to maintenance releases of CockroachDB: @@ -43,7 +43,7 @@ The patch has been applied to maintenance releases of CockroachDB: - [v23.2.11]({% link releases/v23.2.md%}#v23-2-11) - [v24.1.1]({% link releases/v24.1.md%}#v24-1-1) -This public issue is tracked by [131639](https://github.com/cockroachdb/cockroach/issues/131639). +This public issue is tracked by 131639. ## Mitigation diff --git a/src/current/advisories/a144650.md b/src/current/advisories/a144650.md index 969f5c24488..0c79fd10c01 100644 --- a/src/current/advisories/a144650.md +++ b/src/current/advisories/a144650.md @@ -28,9 +28,9 @@ For steps to identify and mitigate affected operations, refer to [Mitigation](#m ## Statement -This issue is resolved in CockroachDB by [#144646](https://github.com/cockroachdb/cockroach/pull/144646), which disables the rare (async flush) code path for bulk write operations. The fix has been applied to the v23.2.24, v24.1.17, v24.3.11, and v25.1.5 production releases, and to the v25.2.0-beta.2 testing release. +This issue is resolved in CockroachDB by #144646, which disables the rare (async flush) code path for bulk write operations. The fix has been applied to the v23.2.24, v24.1.17, v24.3.11, and v25.1.5 production releases, and to the v25.2.0-beta.2 testing release. -The issue is tracked publicly by [#144650](https://github.com/cockroachdb/cockroach/issues/144650). +The issue is tracked publicly by #144650. ## Mitigation diff --git a/src/current/advisories/a151050.md b/src/current/advisories/a151050.md index e259bbad3c3..d737bdde87d 100644 --- a/src/current/advisories/a151050.md +++ b/src/current/advisories/a151050.md @@ -20,11 +20,11 @@ While this issue is expected to occur rarely, we recommend upgrading to the next ## Statement -This issue is resolved in CockroachDB by PR [#151058](https://github.com/cockroachdb/cockroach/pull/151058) which ensures errors encountered during all buffer flushes - even those caused by intermittent object storage provider errors - are correctly reported to the backup job. +This issue is resolved in CockroachDB by PR #151058 which ensures errors encountered during all buffer flushes - even those caused by intermittent object storage provider errors - are correctly reported to the backup job. The fix has been applied to the following versions of CockroachDB: v24.1.22, v24.3.17, v25.1.10, v25.2.4, and the testing version of v25.3, v25.3.0-rc.2. -This issue is tracked publicly by [#151050](https://github.com/cockroachdb/cockroach/issues/151050). +This issue is tracked publicly by #151050. ## Mitigation diff --git a/src/current/advisories/a161317.md b/src/current/advisories/a161317.md index f39aeb1e4c9..2a2990ad293 100644 --- a/src/current/advisories/a161317.md +++ b/src/current/advisories/a161317.md @@ -26,7 +26,7 @@ This advisory applies to the following versions of CockroachDB: ## Statement -This is resolved in CockroachDB by PR [#161318](https://github.com/cockroachdb/cockroach/pull/161318) which fixes the improper error handling in the Avro OCF reader implementation. +This is resolved in CockroachDB by PR #161318 which fixes the improper error handling in the Avro OCF reader implementation. This fix has been applied to the following maintenance releases of CockroachDB: @@ -35,7 +35,7 @@ This fix has been applied to the following maintenance releases of CockroachDB: - v25.4.4 - Testing release v26.1.0-rc.1 -This public issue is tracked in [#161317](https://github.com/cockroachdb/cockroach/issues/161317). +This public issue is tracked in #161317. ## Mitigation diff --git a/src/current/advisories/a162085.md b/src/current/advisories/a162085.md index 35f6e48093d..6d32870ffad 100644 --- a/src/current/advisories/a162085.md +++ b/src/current/advisories/a162085.md @@ -16,7 +16,7 @@ A race condition between MVCC garbage collection and range splits in CockroachDB Most workloads are unlikely to be affected by this issue. The conditions that precipitate data loss are narrow and require a specific combination of data lifecycle patterns and timing. Details on how to assess your risk are provided in the *Am I Affected?* section below. -This public issue is tracked by [#162085](https://github.com/cockroachdb/cockroach/issues/162085). +This public issue is tracked by #162085. ## Action Required @@ -140,7 +140,7 @@ This section provides a detailed technical explanation of the underlying issue f ### Background -CockroachDB v23.1 (PR [#90830](https://github.com/cockroachdb/cockroach/pull/90830)) added an optimization to the garbage collection process. When there is a contiguous span of at least 2,000 non-live (obsolete) keys in a range, the GC process writes a Pebble range tombstone using a ClearRange request to efficiently delete this span of data, rather than deleting each key individually. +CockroachDB v23.1 (PR #90830) added an optimization to the garbage collection process. When there is a contiguous span of at least 2,000 non-live (obsolete) keys in a range, the GC process writes a Pebble range tombstone using a ClearRange request to efficiently delete this span of data, rather than deleting each key individually. ### The race condition diff --git a/src/current/advisories/a166122.md b/src/current/advisories/a166122.md index 45100e6e961..60be0858600 100644 --- a/src/current/advisories/a166122.md +++ b/src/current/advisories/a166122.md @@ -37,7 +37,7 @@ The root cause is in the SQL optimizer, which incorrectly applies two optimizati ## Statement -This is resolved in CockroachDB by [PR #166123](https://github.com/cockroachdb/cockroach/pull/166123), which fixes the optimizer to always fetch the required columns and avoid simplifying predicate evaluation for mutation indexes. +This is resolved in CockroachDB by PR #166123, which fixes the optimizer to always fetch the required columns and avoid simplifying predicate evaluation for mutation indexes. A fix has been applied to the following maintenance releases of CockroachDB: @@ -47,7 +47,7 @@ A fix has been applied to the following maintenance releases of CockroachDB: - v26.1.3 - Testing release v26.2.0-beta.2 -This public issue is tracked by [#166122](https://github.com/cockroachdb/cockroach/issues/166122). +This public issue is tracked by #166122. ## Mitigation diff --git a/src/current/advisories/a190483.md b/src/current/advisories/a190483.md index 9aaf0412c90..70eb4132a29 100644 --- a/src/current/advisories/a190483.md +++ b/src/current/advisories/a190483.md @@ -34,11 +34,11 @@ If the result set is empty, no further action is required. If the result set is ## Statement -This is resolved in CockroachDB by PR [#109713](https://github.com/cockroachdb/cockroach/pull/109713) for v23.1 and PR [#109750](https://github.com/cockroachdb/cockroach/pull/109750) for v22.2. The PRs update the method used to select the boundaries at which ranges of data are split during `RESTORE` to avoid splitting between two column families of the same row. +This is resolved in CockroachDB by PR #109713 for v23.1 and PR #109750 for v22.2. The PRs update the method used to select the boundaries at which ranges of data are split during `RESTORE` to avoid splitting between two column families of the same row. These fixes are available in CockroachDB maintenance releases v23.1.9 and v22.2.14. -This public issue is tracked in [109483](https://github.com/cockroachdb/cockroach/issues/109483). +This public issue is tracked in 109483. ## Mitigation diff --git a/src/current/advisories/a30821.md b/src/current/advisories/a30821.md index cce37051a4b..b5aaab62464 100644 --- a/src/current/advisories/a30821.md +++ b/src/current/advisories/a30821.md @@ -20,7 +20,7 @@ On October 23rd, 2018, an internal review uncovered a security vulnerability wit Registered users were informed privately of the issue and provided with early access to the fix, ahead of the public advisory. -This issue is tracked internally as [#30821](https://github.com/cockroachdb/cockroach/issues/30821). +This issue is tracked internally as #30821. ## Mitigation diff --git a/src/current/advisories/a42567.md b/src/current/advisories/a42567.md index fd2fe281ea0..8d4dad2fb54 100644 --- a/src/current/advisories/a42567.md +++ b/src/current/advisories/a42567.md @@ -26,7 +26,7 @@ This issue was fixed in patch revisions v2.1.10, v19.1.6, and v19.2.2 by requiri The issue also exists in versions v2.0.x and prior. However, up to and including version v2.0.x, the HTTP endpoint was not advertised safe for use on non-privileged networks. Additionally, versions v2.0 and prior have reached end-of-life. All users are invited to upgrade to v2.1.10 or, preferably, a later version. This issue is tracked internally as -[#42567](https://github.com/cockroachdb/cockroach/issues/42567). +#42567. ## Mitigation diff --git a/src/current/advisories/a43870.md b/src/current/advisories/a43870.md index c02b470c322..a1c38c3b41c 100644 --- a/src/current/advisories/a43870.md +++ b/src/current/advisories/a43870.md @@ -26,7 +26,7 @@ Additionally, CockroachDB v19.2.3 and v20.1 include a way to create an HTTP auth Starting in v20.1, the `root` user will also be able to use a password and log in via HTTP interactively. -This issue is tracked internally as [#43870](https://github.com/cockroachdb/cockroach/issues/43870). +This issue is tracked internally as #43870. ## Mitigation diff --git a/src/current/advisories/a44166.md b/src/current/advisories/a44166.md index dfb7a7450a0..17b6e1e62c8 100644 --- a/src/current/advisories/a44166.md +++ b/src/current/advisories/a44166.md @@ -22,7 +22,7 @@ Under reasonable production settings, a single backup job payload may exceed 5MB Starting in CockroachDB v19.2.3, new jobs payloads are reduced in size. A later version will also avoid loading old job entries in memory when viewing recent jobs. -This public issue is tracked as [#44166](https://github.com/cockroachdb/cockroach/issues/44166). +This public issue is tracked as #44166. ## Mitigation diff --git a/src/current/advisories/a44299.md b/src/current/advisories/a44299.md index 13e359a70a1..3b7cb3fa5e1 100644 --- a/src/current/advisories/a44299.md +++ b/src/current/advisories/a44299.md @@ -24,7 +24,7 @@ An identifying symptom is frequent log messages of the format: `job with ID + # + # In addition to the client certificate and key, the init-certs entrypoint will symlink + # the cluster CA to the certs directory. + - name: init-certs + image: cockroachdb/cockroach-k8s-request-cert:0.4 + imagePullPolicy: IfNotPresent + command: + - "/bin/ash" + - "-ecx" + - "/request-cert -namespace=${POD_NAMESPACE} -certs-dir=/cockroach-certs -type=client -user=root -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs + containers: + - name: cockroachdb-client + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs + # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it + # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. + command: + - sleep + - "2147483648" # 2^31 + # This pod isn't doing anything important, so don't bother waiting to terminate it. + terminationGracePeriodSeconds: 0 + volumes: + - name: client-certs + emptyDir: {} diff --git a/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml b/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml new file mode 100644 index 00000000000..779fd77b899 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/cluster-init.yaml @@ -0,0 +1,19 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cluster-init + labels: + app: cockroachdb +spec: + template: + spec: + containers: + - name: cluster-init + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + command: + - "/cockroach/cockroach" + - "init" + - "--insecure" + - "--host=cockroachdb-0.cockroachdb" + restartPolicy: OnFailure diff --git a/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml b/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml new file mode 100644 index 00000000000..891a43aba62 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml @@ -0,0 +1,285 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cockroachdb + labels: + app: cockroachdb +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + serviceAccountName: cockroachdb + # Init containers are run only once in the lifetime of a pod, before + # it's started up for the first time. It has to exit successfully + # before the pod's main containers are allowed to start. + initContainers: + # The init-certs container sends a certificate signing request to the + # kubernetes cluster. + # You can see pending requests using: kubectl get csr + # CSRs can be approved using: kubectl certificate approve + # + # All addresses used to contact a node must be specified in the --addresses arg. + # + # In addition to the node certificate and key, the init-certs entrypoint will symlink + # the cluster CA to the certs directory. + - name: init-certs + image: cockroachdb/cockroach-k8s-request-cert:0.4 + imagePullPolicy: IfNotPresent + command: + - "/bin/ash" + - "-ecx" + - "/request-cert -namespace=${POD_NAMESPACE} -certs-dir=/cockroach-certs -type=node -addresses=localhost,127.0.0.1,$(hostname -f),$(hostname -f|cut -f 1-2 -d '.'),cockroachdb-public,cockroachdb-public.$(hostname -f|cut -f 3- -d '.'),cockroachdb-public.$(hostname -f|cut -f 3-4 -d '.'),cockroachdb-public.$(hostname -f|cut -f 3 -d '.') -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: certs + mountPath: /cockroach-certs + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + containers: + - name: cockroachdb + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + # TODO: Change these to appropriate values for the hardware that you're running. You can see + # the resources that can be allocated on each of your Kubernetes nodes by running: + # kubectl describe nodes + # Note that requests and limits should have identical values. + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# scheme: HTTPS +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + - name: certs + mountPath: /cockroach/cockroach-certs + env: + - name: COCKROACH_CHANNEL + value: kubernetes-secure + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: "1" + - name: MEMORY_LIMIT_MIB + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: "1Mi" + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + # Memory caches are set as a fraction of the pod's memory limit. + - exec + /cockroach/cockroach + start + --logtostderr + --certs-dir /cockroach/cockroach-certs + --advertise-host $(hostname -f) + --http-addr 0.0.0.0 + --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb + --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB + --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + - name: certs + emptyDir: {} + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 100Gi diff --git a/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml b/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml new file mode 100644 index 00000000000..f5623e9f88a --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml @@ -0,0 +1,181 @@ +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + containers: + - name: cockroachdb + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + # TODO: Change these to appropriate values for the hardware that you're running. You can see + # the resources that can be allocated on each of your Kubernetes nodes by running: + # kubectl describe nodes + # Note that requests and limits should have identical values. + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + env: + - name: COCKROACH_CHANNEL + value: kubernetes-insecure + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: "1" + - name: MEMORY_LIMIT_MIB + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: "1Mi" + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + - exec + /cockroach/cockroach + start + --logtostderr + --insecure + --advertise-host $(hostname -f) + --http-addr 0.0.0.0 + --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb + --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB + --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 100Gi diff --git a/src/current/files/cockroach/cloud/kubernetes/example-app.yaml b/src/current/files/cockroach/cloud/kubernetes/example-app.yaml new file mode 100644 index 00000000000..1c358d5eded --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/example-app.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example +spec: + replicas: 1 + selector: + matchLabels: + app: loadgen + template: + metadata: + labels: + app: loadgen + spec: + containers: + - name: loadgen + image: cockroachdb/loadgen-kv:0.1 + imagePullPolicy: IfNotPresent + command: + - "/kv" + - "postgres://root@cockroachdb-public:26257/kv?sslmode=disable" diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/README.md b/src/current/files/cockroach/cloud/kubernetes/multiregion/README.md new file mode 100644 index 00000000000..57b1fd0b9b9 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/README.md @@ -0,0 +1,86 @@ +# Running CockroachDB across multiple Kubernetes clusters (GKE) + +The script and configuration files in this directory enable deploying +CockroachDB across multiple Kubernetes clusters that are spread across different +geographic regions and hosted on [GKE](https://cloud.google.com/kubernetes-engine). It deploys a CockroachDB +[StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) +into each separate cluster, and links them together using DNS. + +To use the configuration provided here, check out this repository (or otherwise +download a copy of this directory), fill in the constants at the top of +[setup.py](setup.py) with the relevant information about your Kubernetes +clusters, optionally make any desired modifications to +[cockroachdb-statefulset-secure.yaml](cockroachdb-statefulset-secure.yaml) as +explained in [our Kubernetes performance tuning +guide](https://www.cockroachlabs.com/docs/stable/kubernetes-performance.html), +then finally run [setup.py](setup.py). + +You should see a lot of output as it does its thing, hopefully ending after +printing out `job "cluster-init-secure" created`. This implies that everything +was created successfully, and you should soon see the CockroachDB cluster +initialized with 3 pods in the "READY" state in each Kubernetes cluster. At this +point you can manage the StatefulSet in each cluster independently if you so +desire, scaling up the number of replicas, changing their resource requests, or +making other modifications as you please. + +If anything goes wrong along the way, please let us know via any of the [normal +troubleshooting +channels](https://www.cockroachlabs.com/docs/stable/support-resources.html). +While we believe this creates a highly available, maintainable multi-region +deployment, it is still pushing the boundaries of how Kubernetes is typically +used, so feedback and issue reports are very appreciated. + +## Limitations + +### Pod-to-pod connectivity + +The deployment outlined in this directory relies on pod IP addresses being +routable even across Kubernetes clusters and regions. This achieves optimal +performance, particularly when compared to alternative solutions that route all packets between clusters through load balancers, but means that it won't work in certain environments. + +This requirement is satisfied by clusters deployed in cloud environments such as Google Kubernetes Engine, and +can also be satisfied by on-prem environments depending on the [Kubernetes networking setup](https://kubernetes.io/docs/concepts/cluster-administration/networking/) used. If you want to test whether your cluster will work, you can run this basic network test: + +```shell +$ kubectl run network-test --image=alpine --restart=Never -- sleep 999999 +pod "network-test" created +$ kubectl describe pod network-test | grep IP +IP: THAT-PODS-IP-ADDRESS +$ kubectl config use-context YOUR-OTHER-CLUSTERS-CONTEXT-HERE +$ kubectl run -it network-test --image=alpine --restart=Never -- ping THAT-PODS-IP-ADDRESS +If you don't see a command prompt, try pressing enter. +64 bytes from 10.12.14.10: seq=1 ttl=62 time=0.570 ms +64 bytes from 10.12.14.10: seq=2 ttl=62 time=0.449 ms +64 bytes from 10.12.14.10: seq=3 ttl=62 time=0.635 ms +64 bytes from 10.12.14.10: seq=4 ttl=62 time=0.722 ms +64 bytes from 10.12.14.10: seq=5 ttl=62 time=0.504 ms +... +``` + +If the pods can directly connect, you should see successful ping output like the +above. If they can't, you won't see any successful ping responses. Make sure to +delete the `network-test` pod in each cluster when you're done! + +### Exposing DNS servers to the Internet + +As currently configured, the way that the DNS servers from each Kubernetes +cluster are hooked together is by exposing them via a load balanced IP address +that's visible to the public Internet. This is because [Google Cloud Platform's Internal Load Balancers do not currently support clients in one region using a load balancer in another region](https://cloud.google.com/compute/docs/load-balancing/internal/#deploying_internal_load_balancing_with_clients_across_vpn_or_interconnect). + +None of the services in your Kubernetes cluster will be made accessible, but +their names could leak out to a motivated attacker. If this is unacceptable, +please let us know and we can demonstrate other options. [Your voice could also +help convince Google to allow clients from one region to use an Internal Load +Balancer in another](https://issuetracker.google.com/issues/111021512), +eliminating the problem. + +## Cleaning up + +To remove all the resources created in your clusters by [setup.py](setup.py), +copy the parameters you provided at the top of [setup.py](setup.py) to the top +of [teardown.py](teardown.py) and run [teardown.py](teardown.py). + +## More information + +For more information on running CockroachDB in Kubernetes, please see the [README +in the parent directory](../README.md). diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml new file mode 100644 index 00000000000..965b5dfcc4e --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: cockroachdb-client-secure + labels: + app: cockroachdb-client +spec: + serviceAccountName: cockroachdb + containers: + - name: cockroachdb-client + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs + # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it + # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. + command: + - sleep + - "2147483648" # 2^31 + # This pod isn't doing anything important, so don't bother waiting to terminate it. + terminationGracePeriodSeconds: 0 + volumes: + - name: client-certs + secret: + secretName: cockroachdb.client.root + defaultMode: 256 diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/cluster-init-secure.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/cluster-init-secure.yaml new file mode 100644 index 00000000000..a915f2028ec --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/cluster-init-secure.yaml @@ -0,0 +1,28 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: cluster-init-secure + labels: + app: cockroachdb +spec: + template: + spec: + serviceAccountName: cockroachdb + containers: + - name: cluster-init + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs + command: + - "/cockroach/cockroach" + - "init" + - "--certs-dir=/cockroach-certs" + - "--host=cockroachdb-0.cockroachdb" + restartPolicy: OnFailure + volumes: + - name: client-certs + secret: + secretName: cockroachdb.client.root + defaultMode: 256 diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml new file mode 100644 index 00000000000..c7bde61ef84 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/cockroachdb-statefulset-secure.yaml @@ -0,0 +1,248 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cockroachdb + labels: + app: cockroachdb +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + serviceAccountName: cockroachdb + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + containers: + - name: cockroachdb + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# scheme: HTTPS +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + - name: certs + mountPath: /cockroach/cockroach-certs + env: + - name: COCKROACH_CHANNEL + value: kubernetes-multiregion + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: "1" + - name: MEMORY_LIMIT_MIB + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: "1Mi" + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + - exec + /cockroach/cockroach + start + --logtostderr + --certs-dir /cockroach/cockroach-certs + --advertise-host $(hostname -f) + --http-addr 0.0.0.0 + --join JOINLIST + --locality LOCALITYLIST + --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB + --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + - name: certs + secret: + secretName: cockroachdb.node + defaultMode: 256 + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 100Gi diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/dns-lb.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/dns-lb.yaml new file mode 100644 index 00000000000..b70474cbd8f --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/dns-lb.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + # TODO: Check whether AWS/Azure can use internal load balancers. Google + # can't, unfortunately. + # service.beta.kubernetes.io/aws-load-balancer-internal: "true" + # service.beta.kubernetes.io/azure-load-balancer-internal: "true" + # cloud.google.com/load-balancer-type: "Internal" + labels: + k8s-app: kube-dns + name: kube-dns-lb + namespace: kube-system +spec: + ports: + - name: dns + port: 53 + protocol: UDP + targetPort: 53 + selector: + k8s-app: kube-dns + sessionAffinity: None + type: LoadBalancer diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml new file mode 100644 index 00000000000..2806594ba3a --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml @@ -0,0 +1,286 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cockroachdb + labels: + app: cockroachdb +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb + # TODO: Use this field to specify a namespace other than "default" in which to deploy CockroachDB (e.g., us-east-1). + # namespace: +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + serviceAccountName: cockroachdb + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + # This init container is used to determine the availability zones of the Cockroach pods. The AZs are used to define --locality when starting Cockroach nodes. + initContainers: + - command: + - sh + - -ecx + - | + TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" \ + -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + echo "aws-$(curl -H "X-aws-ec2-metadata-token: $TOKEN" \ + http://169.254.169.254/latest/meta-data/placement/availability-zone/)" \ + > /etc/cockroach-env/zone + image: byrnedo/alpine-curl:3.20 + imagePullPolicy: IfNotPresent + name: locality-container + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /etc/cockroach-env + name: cockroach-env + containers: + - name: cockroachdb + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + # TODO: Change these to appropriate values for the hardware that you're running. You can see + # the resources that can be allocated on each of your Kubernetes nodes by running: + # kubectl describe nodes + # Note that requests and limits should have identical values. + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# scheme: HTTPS +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + - name: certs + mountPath: /cockroach/cockroach-certs + - name: cockroach-env + mountPath: /etc/cockroach-env + env: + - name: COCKROACH_CHANNEL + value: kubernetes-multiregion + - name: GOMAXPROCS + valueFrom: + resourceFieldRef: + resource: limits.cpu + divisor: "1" + - name: MEMORY_LIMIT_MIB + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: "1Mi" + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + - exec + /cockroach/cockroach + start + --logtostderr + --certs-dir /cockroach/cockroach-certs + --advertise-host $(hostname -f) + --http-addr 0.0.0.0 + # TODO: Replace the placeholder values in --join and --locality with the namespace of the CockroachDB cluster in each region (e.g., us-east-1). + # --join cockroachdb-0.cockroachdb.,cockroachdb-1.cockroachdb.,cockroachdb-2.cockroachdb.,cockroachdb-0.cockroachdb.,cockroachdb-1.cockroachdb.,cockroachdb-2.cockroachdb.,cockroachdb-0.cockroachdb.,cockroachdb-1.cockroachdb.,cockroachdb-2.cockroachdb. + # --locality=region=,az=$(cat /etc/cockroach-env/zone),dns=$(hostname -f) + --cache $(expr $MEMORY_LIMIT_MIB / 4)MiB + --max-sql-memory $(expr $MEMORY_LIMIT_MIB / 4)MiB + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + - name: certs + secret: + secretName: cockroachdb.node + defaultMode: 256 + - name: cockroach-env + emptyDir: {} + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 100Gi diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml new file mode 100644 index 00000000000..cb276be2efa --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + ready + health + kubernetes cluster.local in-addr.arpa ip6.arpa { + pods insecure + upstream + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf + cache 10 + loop + reload + loadbalance + } + .svc.cluster.local:53 { # <---- Modify + log + errors + ready + cache 10 + forward . { # <---- Modify + force_tcp # <---- Modify + } + } + .svc.cluster.local:53 { # <---- Modify + log + errors + ready + cache 10 + forward . { # <---- Modify + force_tcp # <---- Modify + } + } diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml new file mode 100644 index 00000000000..e80c63f29d3 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: kube-dns + name: cockroachdb-dns-external + namespace: kube-system + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" +spec: + ports: + - name: dns + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns + type: LoadBalancer + loadBalancerSourceRanges: ["0.0.0.0/0"] diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/example-app-secure.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/example-app-secure.yaml new file mode 100644 index 00000000000..0925dc1b800 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/example-app-secure.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-secure +spec: + replicas: 1 + selector: + matchLabels: + app: loadgen + template: + metadata: + labels: + app: loadgen + spec: + serviceAccountName: cockroachdb + volumes: + - name: client-certs + secret: + secretName: cockroachdb.client.root + defaultMode: 256 + containers: + - name: loadgen + image: cockroachdb/loadgen-kv:0.1 + imagePullPolicy: IfNotPresent + volumeMounts: + - name: client-certs + mountPath: /cockroach-certs + command: + - "/kv" + - "postgres://root@cockroachdb-public:26257/kv?sslmode=verify-full&sslcert=/cockroach-certs/client.root.crt&sslkey=/cockroach-certs/client.root.key&sslrootcert=/cockroach-certs/ca.crt" diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/external-name-svc.yaml b/src/current/files/cockroach/cloud/kubernetes/multiregion/external-name-svc.yaml new file mode 100644 index 00000000000..d45b53b8acc --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/external-name-svc.yaml @@ -0,0 +1,64 @@ +# This file contains the definitions needed to expose cockroachdb in a namespace +# other than the one it's running in. +# To use this file: +# 1. Replace "YOUR_ZONE_HERE" in this file with the name of the namespace that +# cockroachdb is running in in the given cluster. +# 2. Create a secret containing the certificates in the namespace that you want +# to expose the service in (the "default" namespace is assumed by the +# certificate creation commands in setup.py): +# kubectl create secret generic cockroachdb.client.root --namespace=YOUR_ZONE_HERE --from-file=certs +# 3. Create the resources in this cluster: +# kubectl apply -f external-name-svc.yaml +# +# After completing these steps, you should be able to access the cockroachdb +# cluster at the name `cockroachdb-public` in the default Kubernetes namespace +# (or at the name `cockroachdb-public.default` from any namespace). +# +# Note that the ServiceAccount and roles defined below are only needed for +# accessing the Secret containing the root client certificate. If you are +# managing client certificates (or passwords) some other way, you can do away +# with everything in this file other than the Service. +kind: Service +apiVersion: v1 +metadata: + name: cockroachdb-public +spec: + type: ExternalName + externalName: cockroachdb-public.YOUR_ZONE_HERE.svc.cluster.local +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cockroachdb + labels: + app: cockroachdb +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/setup.py b/src/current/files/cockroach/cloud/kubernetes/multiregion/setup.py new file mode 100644 index 00000000000..0531d47736b --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/setup.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python + +# Copyright 2018 The Cockroach Authors. +# +# Use of this software is governed by the CockroachDB Software License +# included in the /LICENSE file. + + +import json +import os +from subprocess import check_call,check_output +from sys import exit +from time import sleep + +# Before running the script, fill in appropriate values for all the parameters +# above the dashed line. + +# Fill in the `contexts` map with the zones of your clusters and their +# corresponding kubectl context names. +# +# To get the names of your kubectl "contexts" for each of your clusters, run: +# kubectl config get-contexts +# +# example: +# contexts = { +# 'us-central1-a': 'gke_cockroach-alex_us-central1-a_my-cluster', +# 'us-central1-b': 'gke_cockroach-alex_us-central1-b_my-cluster', +# 'us-west1-b': 'gke_cockroach-alex_us-west1-b_my-cluster', +# } +contexts = { +} + +# Fill in the `regions` map with the zones and corresponding regions of your +# clusters. +# +# Setting regions is optional, but recommended, because it improves cockroach's +# ability to diversify data placement if you use more than one zone in the same +# region. If you aren't specifying regions, just leave the map empty. +# +# example: +# regions = { +# 'us-central1-a': 'us-central1', +# 'us-central1-b': 'us-central1', +# 'us-west1-b': 'us-west1', +# } +regions = { +} + +# Paths to directories in which to store certificates and generated YAML files. +certs_dir = './certs' +ca_key_dir = './my-safe-directory' +generated_files_dir = './generated' + +# Path to the cockroach binary on your local machine that you want to use +# generate certificates. Defaults to trying to find cockroach in your PATH. +cockroach_path = 'cockroach' + +# ------------------------------------------------------------------------------ + +# First, do some basic input validation. +if len(contexts) == 0: + exit("must provide at least one Kubernetes cluster in the `contexts` map at the top of the script") + +if len(regions) != 0 and len(regions) != len(contexts): + exit("regions not specified for all kubectl contexts (%d regions, %d contexts)" % (len(regions), len(contexts))) + +try: + check_call(["which", cockroach_path]) +except: + exit("no binary found at provided path '" + cockroach_path + "'; please put a cockroach binary in your path or change the cockroach_path variable") + +for zone, context in contexts.items(): + try: + check_call(['kubectl', 'get', 'pods', '--context', context]) + except: + exit("unable to make basic API call using kubectl context '%s' for cluster in zone '%s'; please check if the context is correct and your Kubernetes cluster is working" % (context, zone)) + +# Set up the necessary directories and certificates. Ignore errors because they may already exist. +try: + os.mkdir(certs_dir) +except OSError: + pass +try: + os.mkdir(ca_key_dir) +except OSError: + pass +try: + os.mkdir(generated_files_dir) +except OSError: + pass + +check_call([cockroach_path, 'cert', 'create-ca', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key']) +check_call([cockroach_path, 'cert', 'create-client', 'root', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key']) + +# For each cluster, create secrets containing the node and client certificates. +# Note that we create the root client certificate in both the zone namespace +# and the default namespace so that it's easier for clients in the default +# namespace to use without additional steps. +# +# Also create a load balancer to each cluster's DNS pods. +for zone, context in contexts.items(): + check_call(['kubectl', 'create', 'namespace', zone, '--context', context]) + check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.client.root', '--from-file', certs_dir, '--context', context]) + check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.client.root', '--namespace', zone, '--from-file', certs_dir, '--context', context]) + check_call([cockroach_path, 'cert', 'create-node', '--certs-dir', certs_dir, '--ca-key', ca_key_dir+'/ca.key', 'localhost', '127.0.0.1', 'cockroachdb-public', 'cockroachdb-public.default', 'cockroachdb-public.'+zone, 'cockroachdb-public.%s.svc.cluster.local' % (zone), '*.cockroachdb', '*.cockroachdb.'+zone, '*.cockroachdb.%s.svc.cluster.local' % (zone)]) + check_call(['kubectl', 'create', 'secret', 'generic', 'cockroachdb.node', '--namespace', zone, '--from-file', certs_dir, '--context', context]) + check_call('rm %s/node.*' % (certs_dir), shell=True) + + check_call(['kubectl', 'apply', '-f', 'dns-lb.yaml', '--context', context]) + +# Set up each cluster to forward DNS requests for zone-scoped namespaces to the +# relevant cluster's DNS server, using load balancers in order to create a +# static IP for each cluster's DNS endpoint. +dns_ips = dict() +for zone, context in contexts.items(): + external_ip = '' + while True: + external_ip = check_output(['kubectl', 'get', 'svc', 'kube-dns-lb', '--namespace', 'kube-system', '--context', context, '--template', '{{range .status.loadBalancer.ingress}}{{.ip}}{{end}}']).decode('utf-8') + if external_ip: + break + print('Waiting for DNS load balancer IP in %s...' % (zone)) + sleep(10) + print('DNS endpoint for zone %s: %s' % (zone, external_ip)) + dns_ips[zone] = external_ip + +# Update each cluster's DNS configuration with an appropriate configmap. Note +# that we have to leave the local cluster out of its own configmap to avoid +# infinite recursion through the load balancer IP. We then have to delete the +# existing DNS pods in order for the new configuration to take effect. +for zone, context in contexts.items(): + remote_dns_ips = dict() + for z, ip in dns_ips.items(): + if z == zone: + continue + remote_dns_ips[z+'.svc.cluster.local'] = [ip] + config_filename = '%s/dns-configmap-%s.yaml' % (generated_files_dir, zone) + with open(config_filename, 'w') as f: + f.write("""\ +apiVersion: v1 +kind: ConfigMap +metadata: + name: kube-dns + namespace: kube-system +data: + stubDomains: | + %s +""" % (json.dumps(remote_dns_ips))) + check_call(['kubectl', 'apply', '-f', config_filename, '--namespace', 'kube-system', '--context', context]) + check_call(['kubectl', 'delete', 'pods', '-l', 'k8s-app=kube-dns', '--namespace', 'kube-system', '--context', context]) + +# Create a cockroachdb-public service in the default namespace in each cluster. +for zone, context in contexts.items(): + yaml_file = '%s/external-name-svc-%s.yaml' % (generated_files_dir, zone) + with open(yaml_file, 'w') as f: + check_call(['sed', 's/YOUR_ZONE_HERE/%s/g' % (zone), 'external-name-svc.yaml'], stdout=f) + check_call(['kubectl', 'apply', '-f', yaml_file, '--context', context]) + +# Generate the join string to be used. +join_addrs = [] +for zone in contexts: + for i in range(3): + join_addrs.append('cockroachdb-%d.cockroachdb.%s' % (i, zone)) +join_str = ','.join(join_addrs) + +# Create the cockroach resources in each cluster. +for zone, context in contexts.items(): + if zone in regions: + locality = 'region=%s,zone=%s' % (regions[zone], zone) + else: + locality = 'zone=%s' % (zone) + yaml_file = '%s/cockroachdb-statefulset-%s.yaml' % (generated_files_dir, zone) + with open(yaml_file, 'w') as f: + check_call(['sed', 's/JOINLIST/%s/g;s/LOCALITYLIST/%s/g' % (join_str, locality), 'cockroachdb-statefulset-secure.yaml'], stdout=f) + check_call(['kubectl', 'apply', '-f', yaml_file, '--namespace', zone, '--context', context]) + +# Finally, initialize the cluster. +print('Sleeping 30 seconds before attempting to initialize cluster to give time for volumes to be created and pods started.') +sleep(30) +for zone, context in contexts.items(): + check_call(['kubectl', 'create', '-f', 'cluster-init-secure.yaml', '--namespace', zone, '--context', context]) + # We only need run the init command in one zone given that all the zones are + # joined together as one cluster. + break diff --git a/src/current/files/cockroach/cloud/kubernetes/multiregion/teardown.py b/src/current/files/cockroach/cloud/kubernetes/multiregion/teardown.py new file mode 100644 index 00000000000..765ca3e9bbb --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/multiregion/teardown.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# Copyright 2018 The Cockroach Authors. +# +# Use of this software is governed by the CockroachDB Software License +# included in the /LICENSE file. + + +from shutil import rmtree +from subprocess import call + +# Before running the script, fill in appropriate values for all the parameters +# above the dashed line. You should use the same values when tearing down a +# cluster that you used when setting it up. + +# To get the names of your kubectl "contexts" for each of your clusters, run: +# kubectl config get-contexts +contexts = { + 'us-central1-a': 'gke_cockroach-alex_us-central1-a_dns', + 'us-central1-b': 'gke_cockroach-alex_us-central1-b_dns', + 'us-west1-b': 'gke_cockroach-alex_us-west1-b_dns', +} + +certs_dir = './certs' +ca_key_dir = './my-safe-directory' +generated_files_dir = './generated' + +# ------------------------------------------------------------------------------ + +# Delete each cluster's special zone-scoped namespace, which transitively +# deletes all resources that were created in the namespace, along with the few +# other resources we created that weren't in that namespace +for zone, context in contexts.items(): + call(['kubectl', 'delete', 'namespace', zone, '--context', context]) + call(['kubectl', 'delete', 'secret', 'cockroachdb.client.root', '--context', context]) + call(['kubectl', 'delete', '-f', 'external-name-svc.yaml', '--context', context]) + call(['kubectl', 'delete', '-f', 'dns-lb.yaml', '--context', context]) + call(['kubectl', 'delete', 'configmap', 'kube-dns', '--namespace', 'kube-system', '--context', context]) + # Restart the DNS pods to clear out our stub-domains configuration. + call(['kubectl', 'delete', 'pods', '-l', 'k8s-app=kube-dns', '--namespace', 'kube-system', '--context', context]) + +try: + rmtree(certs_dir) +except OSError: + pass +try: + rmtree(ca_key_dir) +except OSError: + pass +try: + rmtree(generated_files_dir) +except OSError: + pass diff --git a/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml b/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml new file mode 100644 index 00000000000..1a678f19daf --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml @@ -0,0 +1,215 @@ +# This configuration file sets up an insecure StatefulSet running CockroachDB with +# tweaks to make it more performant than our default configuration files. All +# changes from the default insecure configuration have been marked with a comment +# starting with "NOTE" or "TODO". +# +# Beware that this configuration is quite insecure. By default, it will make +# CockroachDB accessible on port 26257 on your Kubernetes nodes' network +# interfaces, meaning that if your nodes are reachable from the Internet, then +# this CockroachDB cluster will be too. To disable this behavior, remove the +# `hostNetwork` configuration field below. +# +# To use this file, customize all the parts labeled "TODO" before running: +# kubectl create -f cockroachdb-statefulset-insecure.yaml +# +# You will then have to initialize the cluster as described in the parent +# directory's README.md file. +# +# If you don't see any pods being created, it's possible that your cluster was +# not able to meet the resource requests asked for, whether it was the amount +# of CPU, memory, or disk or the disk type. To find information about why pods +# haven't been created, you can run: +# kubectl get events +# +# For more information on improving CockroachDB performance in Kubernetes, see +# our docs: +# https://www.cockroachlabs.com/docs/stable/kubernetes-performance.html +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + # NOTE: Running with `hostNetwork: true` means that CockroachDB will use + # the host machines' IP address and hostname, and that nothing else on + # the machines will be able to use the same ports. This means that only 1 + # CockroachDB pod will ever be schedulable on the same machine, because + # otherwise their ports would conflict. + # + # If your client pods generate a lot of network traffic to and from the + # CockroachDB cluster, you may see a benefit to doing the same thing in + # their configurations. + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # NOTE: If you are running clients that generate heavy load, you may find + # it useful to copy this anti-affinity policy into the client pods' + # configurations as well to avoid running them on the same machines as + # CockroachDB and interfering with each other's performance. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + containers: + - name: cockroachdb + # NOTE: Always use the most recent version of CockroachDB for the best + # performance and reliability. + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + # TODO: Change these to appropriate values for the hardware that you're running. You can see + # the resources that can be allocated on each of your Kubernetes nodes by running: + # kubectl describe nodes + # Note that requests and limits should have identical values. + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + env: + - name: COCKROACH_CHANNEL + value: kubernetes-insecure + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + - "exec /cockroach/cockroach start --logtostderr --insecure --advertise-host $(hostname -f) --http-addr 0.0.0.0 --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb --cache 25% --max-sql-memory 25%" + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + # TODO: This specifically asks for a storage class with the name "ssd". A + # storage class of this name doesn't exist by default. See our docs for + # more information on how to create an optimized storage class for use here: + # https://www.cockroachlabs.com/docs/stable/kubernetes-performance.html#disk-type + storageClassName: ssd + resources: + requests: + # TODO: This asks for a fairly large disk by default because on + # certain popular clouds there is a direct correlation between disk + # size and the IOPS provisioned to the disk. Change this as necessary + # to suit your needs, but be aware that smaller disks will typically + # mean worse performance. + storage: 1024Gi diff --git a/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml b/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml new file mode 100644 index 00000000000..44f48abbf36 --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml @@ -0,0 +1,312 @@ +# This configuration file sets up a secure StatefulSet running CockroachDB with +# tweaks to make it more performant than our default configuration files. All +# changes from the default secure configuration have been marked with a comment +# starting with "NOTE" or "TODO". +# +# To use it, customize all the parts of the file labeled "TODO" before running: +# kubectl create -f cockroachdb-statefulset-secure.yaml +# +# You will then have to approve certificate-signing requests and initialize the +# cluster as described in the parent directory's README.md file. +# +# If you don't see any pods being created, it's possible that your cluster was +# not able to meet the resource requests asked for, whether it was the amount +# of CPU, memory, or disk or the disk type. To find information about why pods +# haven't been created, you can run: +# kubectl get events +# +# For more information on improving CockroachDB performance in Kubernetes, see +# our docs: +# https://www.cockroachlabs.com/docs/stable/kubernetes-performance.html +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cockroachdb + labels: + app: cockroachdb +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cockroachdb + labels: + app: cockroachdb +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - create + - get + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cockroachdb + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cockroachdb +subjects: +- kind: ServiceAccount + name: cockroachdb + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + # This service is meant to be used by clients of the database. It exposes a ClusterIP that will + # automatically load balance connections to the different database pods. + name: cockroachdb-public + labels: + app: cockroachdb +spec: + ports: + # The main port, served by gRPC, serves Postgres-flavor SQL, internode + # traffic and the cli. + - port: 26257 + targetPort: 26257 + name: grpc + # The secondary port serves the UI as well as health and debug endpoints. + - port: 8080 + targetPort: 8080 + name: http + selector: + app: cockroachdb +--- +apiVersion: v1 +kind: Service +metadata: + # This service only exists to create DNS entries for each pod in the stateful + # set such that they can resolve each other's IP addresses. It does not + # create a load-balanced ClusterIP and should not be used directly by clients + # in most circumstances. + name: cockroachdb + labels: + app: cockroachdb + annotations: + # Use this annotation in addition to the actual publishNotReadyAddresses + # field below because the annotation will stop being respected soon but the + # field is broken in some versions of Kubernetes: + # https://github.com/kubernetes/kubernetes/issues/58662 + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" + # Enable automatic monitoring of all instances when Prometheus is running in the cluster. + prometheus.io/scrape: "true" + prometheus.io/path: "_status/vars" + prometheus.io/port: "8080" +spec: + ports: + - port: 26257 + targetPort: 26257 + name: grpc + - port: 8080 + targetPort: 8080 + name: http + # We want all pods in the StatefulSet to have their addresses published for + # the sake of the other CockroachDB pods even before they're ready, since they + # have to be able to talk to each other in order to become ready. + publishNotReadyAddresses: true + clusterIP: None + selector: + app: cockroachdb +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: cockroachdb-budget + labels: + app: cockroachdb +spec: + selector: + matchLabels: + app: cockroachdb + maxUnavailable: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: cockroachdb +spec: + serviceName: "cockroachdb" + replicas: 3 + selector: + matchLabels: + app: cockroachdb + template: + metadata: + labels: + app: cockroachdb + spec: + serviceAccountName: cockroachdb + # NOTE: Running with `hostNetwork: true` means that CockroachDB will use + # the host machines' IP address and hostname, and that nothing else on + # the machines will be able to use the same ports. This means that only 1 + # CockroachDB pod will ever be schedulable on the same machine, because + # otherwise their ports would conflict. + # + # If your client pods generate a lot of network traffic to and from the + # CockroachDB cluster, you may see a benefit to doing the same thing in + # their configurations. + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + # Init containers are run only once in the lifetime of a pod, before + # it's started up for the first time. It has to exit successfully + # before the pod's main containers are allowed to start. + initContainers: + # The init-certs container sends a certificate signing request to the + # kubernetes cluster. + # You can see pending requests using: kubectl get csr + # CSRs can be approved using: kubectl certificate approve + # + # All addresses used to contact a node must be specified in the --addresses arg. + # + # In addition to the node certificate and key, the init-certs entrypoint will symlink + # the cluster CA to the certs directory. + - name: init-certs + image: cockroachdb/cockroach-k8s-request-cert:0.4 + imagePullPolicy: IfNotPresent + command: + - "/bin/ash" + - "-ecx" + - "/request-cert -namespace=${POD_NAMESPACE} -certs-dir=/cockroach-certs -type=node -addresses=localhost,127.0.0.1,$(hostname -f),$(hostname -f|cut -f 1-2 -d '.'),cockroachdb-public,cockroachdb-public.$(hostname -f|cut -f 3- -d '.'),cockroachdb-public.$(hostname -f|cut -f 3-4 -d '.'),cockroachdb-public.$(hostname -f|cut -f 3 -d '.') -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: certs + mountPath: /cockroach-certs + # NOTE: If you are running clients that generate heavy load, you may find + # it useful to copy this anti-affinity policy into the client pods' + # configurations as well to avoid running them on the same machines as + # CockroachDB and interfering with each other's performance. + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - cockroachdb + topologyKey: kubernetes.io/hostname + containers: + - name: cockroachdb + # NOTE: Always use the most recent version of CockroachDB for the best + # performance and reliability. + image: cockroachdb/cockroach:latest + imagePullPolicy: IfNotPresent + # TODO: Change these to appropriate values for the hardware that you're running. You can see + # the resources that can be allocated on each of your Kubernetes nodes by running: + # kubectl describe nodes + # Note that requests and limits should have identical values. + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "2" + memory: "8Gi" + ports: + - containerPort: 26257 + name: grpc + - containerPort: 8080 + name: http +# We recommend that you do not configure a liveness probe on a production environment, as this can impact the availability of production databases. +# livenessProbe: +# httpGet: +# path: "/health" +# port: http +# scheme: HTTPS +# initialDelaySeconds: 30 +# periodSeconds: 5 + readinessProbe: + httpGet: + path: "/health?ready=1" + port: http + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 5 + failureThreshold: 2 + volumeMounts: + - name: datadir + mountPath: /cockroach/cockroach-data + - name: certs + mountPath: /cockroach/cockroach-certs + env: + - name: COCKROACH_CHANNEL + value: kubernetes-secure + command: + - "/bin/bash" + - "-ecx" + # The use of qualified `hostname -f` is crucial: + # Other nodes aren't able to look up the unqualified hostname. + - "exec /cockroach/cockroach start --logtostderr --certs-dir /cockroach/cockroach-certs --advertise-host $(hostname -f) --http-addr 0.0.0.0 --join cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb --cache 25% --max-sql-memory 25%" + # No pre-stop hook is required, a SIGTERM plus some time is all that's + # needed for graceful shutdown of a node. + terminationGracePeriodSeconds: 60 + volumes: + - name: datadir + persistentVolumeClaim: + claimName: datadir + - name: certs + emptyDir: {} + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - metadata: + name: datadir + spec: + accessModes: + - "ReadWriteOnce" + # TODO: This specifically asks for a storage class with the name "ssd". A + # storage class of this name doesn't exist by default. See our docs for + # more information on how to create an optimized storage class for use here: + # https://www.cockroachlabs.com/docs/stable/kubernetes-performance.html#disk-type + storageClassName: ssd + resources: + requests: + # TODO: This asks for a fairly large disk by default because on + # certain popular clouds there is a direct correlation between disk + # size and the IOPS provisioned to the disk. Change this as necessary + # to suit your needs, but be aware that smaller disks will typically + # mean worse performance. + storage: 1024Gi diff --git a/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml b/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml new file mode 100644 index 00000000000..275dbe1b81b --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml @@ -0,0 +1,205 @@ +# GENERATED FILE - DO NOT EDIT +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + labels: + app: cockroachdb + prometheus: cockroachdb + role: alert-rules + name: prometheus-cockroachdb-rules +spec: + groups: + - name: rules/dummy.rules + rules: + - alert: TestAlertManager + expr: vector(1) + - name: rules/aggregation.rules + rules: + - expr: sum without(store) (capacity{job="cockroachdb"}) + record: node:capacity + - expr: sum without(instance) (node:capacity{job="cockroachdb"}) + record: cluster:capacity + - expr: sum without(store) (capacity_available{job="cockroachdb"}) + record: node:capacity_available + - expr: sum without(instance) (node:capacity_available{job="cockroachdb"}) + record: cluster:capacity_available + - expr: capacity_available{job="cockroachdb"} / capacity{job="cockroachdb"} + record: capacity_available:ratio + - expr: node:capacity_available{job="cockroachdb"} / node:capacity{job="cockroachdb"} + record: node:capacity_available:ratio + - expr: cluster:capacity_available{job="cockroachdb"} / cluster:capacity{job="cockroachdb"} + record: cluster:capacity_available:ratio + - expr: rate(txn_durations_bucket{job="cockroachdb"}[1m]) + record: txn_durations_bucket:rate1m + - expr: histogram_quantile(0.5, txn_durations_bucket:rate1m) + record: txn_durations:rate1m:quantile_50 + - expr: histogram_quantile(0.75, txn_durations_bucket:rate1m) + record: txn_durations:rate1m:quantile_75 + - expr: histogram_quantile(0.9, txn_durations_bucket:rate1m) + record: txn_durations:rate1m:quantile_90 + - expr: histogram_quantile(0.95, txn_durations_bucket:rate1m) + record: txn_durations:rate1m:quantile_95 + - expr: histogram_quantile(0.99, txn_durations_bucket:rate1m) + record: txn_durations:rate1m:quantile_99 + - expr: rate(exec_latency_bucket{job="cockroachdb"}[1m]) + record: exec_latency_bucket:rate1m + - expr: histogram_quantile(0.5, exec_latency_bucket:rate1m) + record: exec_latency:rate1m:quantile_50 + - expr: histogram_quantile(0.75, exec_latency_bucket:rate1m) + record: exec_latency:rate1m:quantile_75 + - expr: histogram_quantile(0.9, exec_latency_bucket:rate1m) + record: exec_latency:rate1m:quantile_90 + - expr: histogram_quantile(0.95, exec_latency_bucket:rate1m) + record: exec_latency:rate1m:quantile_95 + - expr: histogram_quantile(0.99, exec_latency_bucket:rate1m) + record: exec_latency:rate1m:quantile_99 + - expr: rate(round_trip_latency_bucket{job="cockroachdb"}[1m]) + record: round_trip_latency_bucket:rate1m + - expr: histogram_quantile(0.5, round_trip_latency_bucket:rate1m) + record: round_trip_latency:rate1m:quantile_50 + - expr: histogram_quantile(0.75, round_trip_latency_bucket:rate1m) + record: round_trip_latency:rate1m:quantile_75 + - expr: histogram_quantile(0.9, round_trip_latency_bucket:rate1m) + record: round_trip_latency:rate1m:quantile_90 + - expr: histogram_quantile(0.95, round_trip_latency_bucket:rate1m) + record: round_trip_latency:rate1m:quantile_95 + - expr: histogram_quantile(0.99, round_trip_latency_bucket:rate1m) + record: round_trip_latency:rate1m:quantile_99 + - expr: rate(sql_exec_latency_bucket{job="cockroachdb"}[1m]) + record: sql_exec_latency_bucket:rate1m + - expr: histogram_quantile(0.5, sql_exec_latency_bucket:rate1m) + record: sql_exec_latency:rate1m:quantile_50 + - expr: histogram_quantile(0.75, sql_exec_latency_bucket:rate1m) + record: sql_exec_latency:rate1m:quantile_75 + - expr: histogram_quantile(0.9, sql_exec_latency_bucket:rate1m) + record: sql_exec_latency:rate1m:quantile_90 + - expr: histogram_quantile(0.95, sql_exec_latency_bucket:rate1m) + record: sql_exec_latency:rate1m:quantile_95 + - expr: histogram_quantile(0.99, sql_exec_latency_bucket:rate1m) + record: sql_exec_latency:rate1m:quantile_99 + - expr: rate(raft_process_logcommit_latency_bucket{job="cockroachdb"}[1m]) + record: raft_process_logcommit_latency_bucket:rate1m + - expr: histogram_quantile(0.5, raft_process_logcommit_latency_bucket:rate1m) + record: raft_process_logcommit_latency:rate1m:quantile_50 + - expr: histogram_quantile(0.75, raft_process_logcommit_latency_bucket:rate1m) + record: raft_process_logcommit_latency:rate1m:quantile_75 + - expr: histogram_quantile(0.9, raft_process_logcommit_latency_bucket:rate1m) + record: raft_process_logcommit_latency:rate1m:quantile_90 + - expr: histogram_quantile(0.95, raft_process_logcommit_latency_bucket:rate1m) + record: raft_process_logcommit_latency:rate1m:quantile_95 + - expr: histogram_quantile(0.99, raft_process_logcommit_latency_bucket:rate1m) + record: raft_process_logcommit_latency:rate1m:quantile_99 + - expr: rate(raft_process_commandcommit_latency_bucket{job="cockroachdb"}[1m]) + record: raft_process_commandcommit_latency_bucket:rate1m + - expr: histogram_quantile(0.5, raft_process_commandcommit_latency_bucket:rate1m) + record: raft_process_commandcommit_latency:rate1m:quantile_50 + - expr: histogram_quantile(0.75, raft_process_commandcommit_latency_bucket:rate1m) + record: raft_process_commandcommit_latency:rate1m:quantile_75 + - expr: histogram_quantile(0.9, raft_process_commandcommit_latency_bucket:rate1m) + record: raft_process_commandcommit_latency:rate1m:quantile_90 + - expr: histogram_quantile(0.95, raft_process_commandcommit_latency_bucket:rate1m) + record: raft_process_commandcommit_latency:rate1m:quantile_95 + - expr: histogram_quantile(0.99, raft_process_commandcommit_latency_bucket:rate1m) + record: raft_process_commandcommit_latency:rate1m:quantile_99 + - name: rules/alerts.rules + rules: + - alert: InstanceDown + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} has + been down for more than 5 minutes.' + summary: Instance {{ $labels.instance }} down + expr: up{job="cockroachdb"} == 0 + for: 5m + - alert: InstanceDead + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} has + been down for more than 15 minutes.' + summary: Instance {{ $labels.instance }} dead + expr: up{job="cockroachdb"} == 0 + for: 15m + - alert: InstanceRestart + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} restarted + {{ $value }} time(s) in 10m' + summary: Instance {{ $labels.instance }} restarted + expr: resets(sys_uptime{job="cockroachdb"}[10m]) > 0 and resets(sys_uptime{job="cockroachdb"}[10m]) + < 5 + - alert: InstanceFlapping + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} restarted + {{ $value }} time(s) in 10m' + summary: Instance {{ $labels.instance }} flapping + expr: resets(sys_uptime{job="cockroachdb"}[10m]) > 5 + - alert: LivenessMismatch + annotations: + description: Prometheus and {{ $labels.instance }} disagree on liveness + summary: Liveness mismatch for {{ $labels.instance }} + expr: (liveness_livenodes{job="cockroachdb"}) != ignoring(instance) group_left() + (count by(cluster, job) (up{job="cockroachdb"} == 1)) + for: 5m + labels: + severity: testing + - alert: VersionMismatch + annotations: + description: Cluster {{ $labels.cluster }} running {{ $value }} different + versions + summary: Binary version mismatch on {{ $labels.cluster }} + expr: count by(cluster) (count_values by(tag, cluster) ("version", build_timestamp{job="cockroachdb"})) + > 1 + for: 30m + - alert: StoreDiskLow + annotations: + summary: Store {{ $labels.store }} on node {{ $labels.instance }} at {{ $value + }} available disk fraction + expr: capacity_available:ratio{job="cockroachdb"} < 0.15 + - alert: ClusterDiskLow + annotations: + summary: Cluster {{ $labels.cluster }} at {{ $value }} available disk fraction + expr: cluster:capacity_available:ratio{job="cockroachdb"} < 0.2 + - alert: ZeroSQLQps + annotations: + summary: Instance {{ $labels.instance }} has SQL connections but no queries + expr: sql_conns{job="cockroachdb"} > 0 and rate(sql_query_count{job="cockroachdb"}[5m]) + == 0 + for: 10m + - alert: UnavailableRanges + annotations: + summary: Instance {{ $labels.instance }} has {{ $value }} unavailable ranges + expr: (sum by(instance, cluster) (ranges_unavailable{job="cockroachdb"})) > + 0 + for: 10m + labels: + severity: testing + - alert: NoLeaseRanges + annotations: + summary: Instance {{ $labels.instance }} has {{ $value }} ranges without leases + expr: (sum by(instance, cluster) (replicas_leaders_not_leaseholders{job="cockroachdb"})) + > 0 + for: 10m + labels: + severity: testing + - alert: CACertificateExpiresSoon + annotations: + summary: CA certificate for {{ $labels.instance }} expires in less than a + year + expr: (security_certificate_expiration_ca{job="cockroachdb"} > 0) and (security_certificate_expiration_ca{job="cockroachdb"} + - time()) < 86400 * 366 + labels: + frequency: daily + - alert: NodeCertificateExpiresSoon + annotations: + summary: Node certificate for {{ $labels.instance }} expires in less than + six months + expr: (security_certificate_expiration_node{job="cockroachdb"} > 0) and (security_certificate_expiration_node{job="cockroachdb"} + - time()) < 86400 * 183 + labels: + frequency: daily + - alert: HighOpenFDCount + annotations: + summary: 'Too many open file descriptors on {{ $labels.instance }}: {{ $value + }} fraction used' + expr: sys_fd_open{job="cockroachdb"} / sys_fd_softlimit{job="cockroachdb"} > + 0.8 + for: 10m + labels: + severity: testing diff --git a/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml b/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml new file mode 100644 index 00000000000..b558d5f6f6d --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml @@ -0,0 +1,14 @@ +global: + resolve_timeout: 5m +route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'webhook' +receivers: +# Note that this is a dummy configuration just to allow AlertManager to start +- name: 'webhook' + webhook_configs: + - url: 'http://alertmanagerwh:30500/' + diff --git a/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml b/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml new file mode 100644 index 00000000000..5c451a8557f --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml @@ -0,0 +1,27 @@ +# Have prometheus-operator run an AlertManager cluster +apiVersion: monitoring.coreos.com/v1 +kind: Alertmanager +metadata: + name: cockroachdb + labels: + app: cockroachdb +spec: + replicas: 3 +--- +# Create a Service to allow Prometheus to talk to AlertManager +apiVersion: v1 +kind: Service +metadata: + name: alertmanager-cockroachdb + labels: + app: cockroachdb +spec: + type: ClusterIP + ports: + - name: web + port: 9093 + protocol: TCP + targetPort: web + selector: + alertmanager: cockroachdb + diff --git a/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml b/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml new file mode 100644 index 00000000000..c2073a9cbce --- /dev/null +++ b/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml @@ -0,0 +1,94 @@ +# Create a service account for prometheus to run under +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus + labels: + app: cockroachdb +--- +# Define the access permissions that prometheus will run with +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus + labels: + app: cockroachdb +rules: +- apiGroups: [""] + resources: + - nodes + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: + - configmaps + verbs: ["get"] +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +--- +# Associate the service account with the role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus + labels: + app: cockroachdb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: +- kind: ServiceAccount + name: prometheus + namespace: default +--- +# Select any services with the prometheus:cockroachdb label +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: cockroachdb + labels: + app: cockroachdb + prometheus: cockroachdb +spec: + selector: + matchLabels: + prometheus: cockroachdb + endpoints: + - port: http + path: /_status/vars + tlsConfig: + ca: + secret: + key: ca.crt + # This is the secret name used by the CockroachDB Kubernetes Operator. + # When using a custom CA, replace this with your secret name + name: cockroachdb-node + serverName: "127.0.0.1" +--- +# Have prometheus-operator run a replicated Prometheus cluster +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: cockroachdb + labels: + app: cockroachdb +spec: + serviceAccountName: prometheus + alerting: + alertmanagers: + - namespace: default + name: alertmanager-cockroachdb + port: web + serviceMonitorSelector: + matchLabels: + prometheus: cockroachdb + resources: + requests: + memory: 400Mi + ruleSelector: + matchLabels: + role: alert-rules + prometheus: cockroachdb diff --git a/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md b/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md new file mode 100644 index 00000000000..1e48ea2bb8e --- /dev/null +++ b/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md @@ -0,0 +1,1517 @@ +- Feature Name: Distributing SQL queries +- Status: completed +- Start Date: 2015/02/12 +- Authors: andreimatei, knz, RaduBerinde +- RFC PR: [#6067](https://github.com/cockroachdb/cockroach/pull/6067) +- Cockroach Issue: + +# Table of Contents + + + * [Table of Contents](#table-of-contents) + * [Summary](#summary) + * [Vocabulary](#vocabulary) + * [Motivation](#motivation) + * [Detailed design](#detailed-design) + * [Overview](#overview) + * [Logical model and logical plans](#logical-model-and-logical-plans) + * [Example 1](#example-1) + * [Example 2](#example-2) + * [Back propagation of ordering requirements](#back-propagation-of-ordering-requirements) + * [Example 3](#example-3) + * [Types of aggregators](#types-of-aggregators) + * [From logical to physical](#from-logical-to-physical) + * [Processors](#processors) + * [Joins](#joins) + * [Join-by-lookup](#join-by-lookup) + * [Stream joins](#stream-joins) + * [Inter-stream ordering](#inter-stream-ordering) + * [Execution infrastructure](#execution-infrastructure) + * [Creating a local plan: the ScheduleFlows RPC](#creating-a-local-plan-the-scheduleflows-rpc) + * [Local scheduling of flows](#local-scheduling-of-flows) + * [Mailboxes](#mailboxes) + * [On-the-fly flows setup](#on-the-fly-flows-setup) + * [Retiring flows](#retiring-flows) + * [Error handling](#error-handling) + * [A more complex example: Daily Promotion](#a-more-complex-example-daily-promotion) + * [Implementation strategy](#implementation-strategy) + * [Logical planning](#logical-planning) + * [Physical planning](#physical-planning) + * [Processor infrastructure and implementing processors](#processor-infrastructure-and-implementing-processors) + * [Joins](#joins-1) + * [Scheduling](#scheduling) + * [KV integration](#kv-integration) + * [Implementation notes](#implementation-notes) + * [Test infrastructure](#test-infrastructure) + * [Visualisation/tracing](#visualisationtracing) + * [Alternate approaches considered (and rejected)](#alternate-approaches-considered-and-rejected) + * [More logic in the KV layer](#more-logic-in-the-kv-layer) + * [Complexity](#complexity) + * [Applicability](#applicability) + * [SQL2SQL: Distributed SQL layer](#sql2sql-distributed-sql-layer) + * [Sample high-level query flows](#sample-high-level-query-flows) + * [Complexity](#complexity-1) + * [Applicability](#applicability-1) + * [Spark: Compiling SQL into a data-parallel language running on top of a distributed-execution runtime](#spark-compiling-sql-into-a-data-parallel-language-running-on-top-of-a-distributed-execution-runtime) + * [Sample program](#sample-program) + * [Complexity](#complexity-2) + * [Unresolved questions](#unresolved-questions) + + +# Summary + +In this RFC we propose a general approach for distributing SQL processing and +moving computation closer to the data. The goal is to trigger an initial +discussion and not a complete detailed design. + +## Vocabulary + +- KV - the KV system in cockroach, defined by its key-value, range and batch API +- k/v - a key-value pair, usually used to refer to an entry in KV +- Node - machine in the cluster +- Client / Client-side - the SQL client +- Gateway node / Gateway-side - the cluster node to which the client SQL query is delivered first +- Leader node / Leader-side - the cluster node which resolves a KV operation and + has local access to the respective KV data + +Most of the following text reads from the gateway-side perspective, where the query parsing and planning currently runs. + +# Motivation + +The desired improvements are listed below. + +1. Remote-side filtering + + When querying for a set of rows that match a filtering expression, we + currently query all the keys in certain ranges and process the filters after + receiving the data on the gateway node over the network. Instead, we want the + filtering expression to be processed by the lease holder or remote node, saving on + network traffic and related processing. + + The remote-side filtering does not need to support full SQL expressions - it + can support a subset that includes common expressions (e.g. everything that + can be translated into expressions operating on strings) with the requesting + node applying a "final" filter. + +2. Remote-side updates and deletes + + For statements like `UPDATE .. WHERE` and `DELETE .. WHERE` we currently + perform a query, receive results at the gateway over the network, and then + perform the update or deletion there. This involves too many round-trips; + instead, we want the query and updates to happen on the node which has access + to the data. + + Again, this does not need to cover all possible SQL expressions (we can keep + a working "slow" path for some cases). However, to cover the most important + queries we still need more than simple filtering expressions (`UPDATE` + commonly uses expressions and functions for calculating new values). + +3. Distributed SQL operations + + Currently SQL operations are processed by the entry node and thus their +performance does not scale with the size of the cluster. We want to be able to +distribute the processing on multiple nodes (parallelization for performance). + + 1. Distributed joins + + In joins, we produce results by matching the values of certain columns + among multiple tables. One strategy for distributing this computation is + based on hashing: `K` nodes are chosen and each of the nodes with fast + access to the table data sends the results to the `K` nodes according to a + hash function computed on the join columns (guaranteeing that all results + with the same values on these columns go to the same node). Hash-joins are + employed e.g. by F1. + + + Distributed joins and remote-side filtering can be needed together: + ```sql + -- find all orders placed around the customer's birthday. Notice the + -- filtering needs to happen on the results. I've complicated the filtering + -- condition because a simple equality check could have been made part of + -- the join. + SELECT * FROM Customers c INNER JOIN Orders o ON c.ID = i.CustomerID + WHERE DayOfYear(c.birthday) - DayOfYear(o.date) < 7 + ``` + + 2. Distributed aggregation + + When using `GROUP BY` we aggregate results according to a set of columns or + expressions and compute a function on each group of results. A strategy + similar to hash-joins can be employed to distribute the aggregation. + + 3. Distributed sorting + + When ordering results, we want to be able to distribute the sorting effort. + Nodes would sort their own data sets and one or more nodes would merge the + results. + +# Detailed design + +## Overview + +The proposed approach was originally inspired by [Sawzall][1] - a project by +Rob Pike et al. at Google that proposes a "shell" (high-level language +interpreter) to ease the exploitation of MapReduce. Its main innovation is a +concise syntax to define “local” processes that take a piece of local data and +emit zero or more results (these get translated to Map logic); then another +syntax which takes results from the local transformations and aggregates them +in different ways (this gets translated to Reduce logic). In a nutshell: +Sawzall = MapReduce + high-level syntax + new terminology (conveniently hiding +distributed computations behind a simple set of conceptual constructs). + +We propose something somewhat similar, but targeting a different execution +model than MapReduce. + +1. A predefined set of *aggregators*, performing functionality required by SQL. + Most aggregators are configurable, but not fully programmable. +2. One special aggregator, the 'evaluator', is programmable using a very simple + language, but is restricted to operating on one row of data at + a time. +3. A routing of the results of an aggregator to the next aggregator in the + query pipeline. +4. A logical model that allows for SQL to be compiled in a data-location-agnostic + way, but that captures enough information so that we can distribute the + computation. + +Besides accumulating or aggregating data, the aggregators can feed their results +to another node or set of nodes, possibly as input for other programs. Finally, +aggregators with special functionality for batching up results and performing KV +commands are used to read data or make updates to the database. + +The key idea is that we can map SQL to a well-defined logical model which we +then transform into a distributed execution plan. + +[1]: http://research.google.com/archive/sawzall.html + +## Logical model and logical plans + +We compile SQL into a *logical plan* (similar on the surface to the current +`planNode` tree) which represents the abstract data flow through computation +stages. The logical plan is agnostic to the way data is partitioned and +distributed in the cluster; however, it contains enough information about the +structure of the planned computation to allow us to exploit data parallelism +later - in a subsequent phase, the logical plan will be converted into a +*physical plan*, which maps the abstract computation and data flow to concrete +data processors and communication channels between them. + +The logical plan is made up **aggregators**. Each aggregator consumes an **input +stream** of rows (or more streams for joins, but let's leave that aside for now) +and produces an **output stream** of rows. Each row is a tuple of column values; +both the input and the output streams have a set schema. The schema is a set of +columns and types, with each row having a datum for each column. Again, we +emphasize that the streams are a logical concept and might not map to a single +data stream in the actual computation. + +We introduce the concept of **grouping** to characterize a specific aspect of +the computation that happens inside an aggregator. The groups are defined based +on a **group key**, which is a subset of the columns in the input stream schema. +The computation that happens for each group is independent of the data in the +other groups, and the aggregator emits a concatenation of the results for all +the groups. The ordering between group results in the output stream is not +fixed - some aggregators may guarantee a certain ordering, others may not. + +More precisely, we can define the computation in an aggregator using a function +`agg` that takes a sequence of input rows that are in a single group (same group +key) and produces a set of output rows. The output of an aggregator is +the concatenation of the outputs of `agg` on all the groups, in some order. + +The grouping characteristic will be useful when we later decide how to +distribute the computation that is represented by an aggregator: since results +for each group are independent, different groups can be processed on different +nodes. The more groups we have, the better. At one end of the spectrum there are +single-group aggregators (group key is the empty set of columns - `Group key: +[]`, meaning everything is in the same group) which cannot be distributed. At +the other end there are no-grouping aggregators which can be parallelized +arbitrarily. Note that no-grouping aggregators are different than aggregators +where the group key is the full set of columns - the latter still requires rows +that are equal to be processed on a single node (this would be useful for an +aggregator implementing `DISTINCT` for example). An aggregator with no grouping +is a special but important case in which we are not aggregating multiple pieces +of data, but we may be filtering, transforming, or reordering individual pieces +of data. + +Aggregators can make use of SQL expressions, evaluating them with various inputs +as part of their work. In particular, all aggregators can optionally use an +**output filter** expression - a boolean function that is used to discard +elements that would have otherwise been part of the output stream. + +(Note: the alternative of restricting use of SQL expressions to only certain +aggregators was considered; that approach makes it much harder to support outer +joins, where the `ON` expression evaluation must be part of the internal join +logic and not just a filter on the output.) + +A special type of aggregator is the **evaluator** aggregator which is a +"programmable" aggregator which processes the input stream sequentially (one +element at a time), potentially emitting output elements. This is an aggregator +with no grouping (group key is the full set of columns); the processing of each +row independent. An evaluator can be used, for example, to generate new values from +arbitrary expressions (like the `a+b` in `SELECT a+b FROM ..`); or to filter +rows according to a predicate. + +Special **table reader** aggregators with no inputs are used as data sources; a +table reader can be configured to output only certain columns, as needed. +A special **final** aggregator with no outputs is used for the results of the +query/statement. + +Some aggregators (final, limit) have an **ordering requirement** on the input +stream (a list of columns with corresponding ascending/descending requirements). +Some aggregators (like table readers) can guarantee a certain ordering on their +output stream, called an **ordering guarantee** (same as the `orderingInfo` in +the current code). All aggregators have an associated **ordering +characterization** function `ord(input_order) -> output_order` that maps +`input_order` (an ordering guarantee on the input stream) into `output_order` +(an ordering guarantee for the output stream) - meaning that if the rows in the +input stream are ordered according to `input_order`, then the rows in the output +stream will be ordered according to `output_order`. + +The ordering guarantee of the table readers along with the characterization +functions can be used to propagate ordering information across the logical plan. +When there is a mismatch (an aggregator has an ordering requirement that is not +matched by a guarantee), we insert a **sorting aggregator** - this is a +non-grouping aggregator with output schema identical to the input schema that +reorders the elements in the input stream providing a certain output order +guarantee regardless of the input ordering. We can perform optimizations wrt +sorting at the logical plan level - we could potentially put the sorting +aggregator earlier in the pipeline, or split it into multiple nodes (one of +which performs preliminary sorting in an earlier stage). + +To introduce the main types of aggregators we use of a simple query. + +### Example 1 + +```sql +TABLE Orders (OId INT PRIMARY KEY, CId INT, Value DECIMAL, Date DATE) + +SELECT CID, SUM(VALUE) FROM Orders + WHERE DATE > 2015 + GROUP BY CID + ORDER BY 1 - SUM(Value) +``` + +This is a potential description of the aggregators and streams: + +``` +TABLE-READER src + Table: Orders + Table schema: Oid:INT, Cid:INT, Value:DECIMAL, Date:DATE + Output filter: (Date > 2015) + Output schema: Cid:INT, Value:DECIMAL + Ordering guarantee: Oid + +AGGREGATOR summer + Input schema: Cid:INT, Value:DECIMAL + Output schema: Cid:INT, ValueSum:DECIMAL + Group Key: Cid + Ordering characterization: if input ordered by Cid, output ordered by Cid + +EVALUATOR sortval + Input schema: Cid:INT, ValueSum:DECIMAL + Output schema: SortVal:DECIMAL, Cid:INT, ValueSum:DECIMAL + Ordering characterization: + ValueSum -> ValueSum and -SortVal + Cid,ValueSum -> Cid,ValueSum and Cid,-SortVal + ValueSum,Cid -> ValueSum,Cid and -SortVal,Cid + SQL Expressions: E(x:INT) INT = (1 - x) + Code { + EMIT E(ValueSum), CId, ValueSum + } + +AGGREGATOR final: + Input schema: SortVal:DECIMAL, Cid:INT, ValueSum:DECIMAL + Input ordering requirement: SortVal + Group Key: [] + +Composition: src -> summer -> sortval -> final +``` + +Note that the logical description does not include sorting aggregators. This +preliminary plan will lead to a full logical plan when we propagate ordering +information. We will have to insert a sorting aggregator before `final`: +``` +src -> summer -> sortval -> sort(OrderSum) -> final +``` +Each arrow is a logical stream. This is the complete logical plan. + +In this example we only had one option for the sorting aggregator. Let's look at +another example. + + +### Example 2 + +```sql +TABLE People (Age INT, NetWorth DECIMAL, ...) + +SELECT Age, Sum(NetWorth) FROM v GROUP BY AGE ORDER BY AGE +``` + +Preliminary logical plan description: +``` +TABLE-READER src + Table: People + Table schema: Age:INT, NetWorth:DECIMAL + Output schema: Age:INT, NetWorth:DECIMAL + Ordering guarantee: XXX // will consider different cases later + +AGGREGATOR summer + Input schema: Age:INT, NetWorth:DECIMAL + Output schema: Age:INT, NetWorthSum:DECIMAL + Group Key: Age + Ordering characterization: if input ordered by Age, output ordered by Age + +AGGREGATOR final: + Input schema: Age:INT, NetWorthSum:DECIMAL + Input ordering requirement: Age + Group Key: [] + +Composition: src -> summer -> final +``` + +The `summer` aggregator can perform aggregation in two ways - if the input is +not ordered by Age it will use an unordered map with one entry per `Age` and the +results will be output in arbitrary order; if the input is ordered by `Age` it can +aggregate on one age at a time and it will emit the results in age order. + +Let's take two cases: + +1. src is ordered by `Age` (we use a covering index on `Age`) + + In this case, when we propagate the ordering + information we will notice that `summer` preserves ordering by age and we + won't need to add sorting aggregators. + +2. src is not ordered by anything + + In this case, summer will not have any output ordering guarantees and we will + need to add a sorting aggregator before `final`: + ``` + src -> summer -> sort(Age) -> final + ``` + We could also use the fact that `summer` would preserve the order by `Age` + and put the sorting aggregator before `summer`: + ``` + src -> sort(Age) -> summer -> final + ``` + We would choose between these two logical plans. + +There is also the possibility that `summer` uses an ordered map, in which case +it will always output the results in age order; that would mean we are always in +case 1 above, regardless of the ordering of `src`. + +### Back propagation of ordering requirements + +In the previous example we saw how we could use an ordering on a table reader +stream along with an order preservation guarantee to avoid sorting. The +preliminary logical plan will try to preserve ordering as much as possible to +minimize any additional sorting. + +However, in some cases preserving ordering might come with some cost; some +aggregators could be configured to either preserve ordering or not. To avoid +preserving ordering unnecessarily, after the sorting aggregators are put in +place we post-process the logical plan to relax the ordering on the streams +wherever possible. Specifically, we inspect each logical stream (in reverse +topological order) and check if removing its ordering still yields a correct +plan; this results in a back-propagation of the ordering requirements. + +To recap, the logical planning has three stages: + 1. preliminary logical plan, with ordering preserved as much as possible and no + sort nodes, + 2. order-satisfying logical plan, with sort nodes added as necessary, + 3. final logical plan, with ordering requirements relaxed where possible. + +### Example 3 + +```sql +TABLE v (Name STRING, Age INT, Account INT) + +SELECT COUNT(DISTINCT(account)) FROM v + WHERE age > 10 and age < 30 + GROUP BY age HAVING MIN(Name) > 'k' +``` + +``` +TABLE-READER src + Table: v + Table schema: Name:STRING, Age:INT, Account:INT + Filter: (Age > 10 AND Age < 30) + Output schema: Name:STRING, Age:INT, Account:INT + Ordering guarantee: Name + +AGGREGATOR countdistinctmin + Input schema: Name:String, Age:INT, Account:INT + Group Key: Age + Group results: distinct count as AcctCount:INT + MIN(Name) as MinName:STRING + Output filter: (MinName > 'k') + Output schema: AcctCount:INT + Ordering characterization: if input ordered by Age, output ordered by Age + +AGGREGATOR final: + Input schema: AcctCount:INT + Input ordering requirement: none + Group Key: [] + +Composition: src -> countdistinctmin -> final +``` + +### Types of aggregators + +- `TABLE READER` is a special aggregator, with no input stream. It's configured + with spans of a table or index and the schema that it needs to read. + Like every other aggregator, it can be configured with a programmable output + filter. +- `EVALUATOR` is a fully programmable no-grouping aggregator. It runs a "program" + on each individual row. The evaluator can drop the row, or modify it + arbitrarily. +- `JOIN` performs a join on two streams, with equality constraints between + certain columns. The aggregator is grouped on the columns that are + constrained to be equal. See [Stream joins](#stream-joins). +- `JOIN READER` performs point-lookups for rows with the keys indicated by the + input stream. It can do so by performing (potentially remote) KV reads, or by + setting up remote flows. See [Join-by-lookup](#join-by-lookup) and + [On-the-fly flows setup](#on-the-fly-flows-setup). +- `MUTATE` performs insertions/deletions/updates to KV. See section TODO. +- `SET OPERATION` takes several inputs and performs set arithmetic on them + (union, difference). +- `AGGREGATOR` is the one that does "aggregation" in the SQL sense. It groups + rows and computes an aggregate for each group. The group is configured using + the group key. `AGGREGATOR` can be configured with one or more aggregation + functions: + - `SUM` + - `COUNT` + - `COUNT DISTINCT` + - `DISTINCT` + + `AGGREGATOR`'s output schema consists of the group key, plus a configurable + subset of the generated aggregated values. The optional output filter has + access to the group key and all the aggregated values (i.e. it can use even + values that are not ultimately outputted). +- `SORT` sorts the input according to a configurable set of columns. Note that + this is a no-grouping aggregator, hence it can be distributed arbitrarily to + the data producers. This means, of course, that it doesn't produce a global + ordering, instead it just guarantees an intra-stream ordering on each + physical output streams). The global ordering, when needed, is achieved by an + input synchronizer of a grouped processor (such as `LIMIT` or `FINAL`). +- `LIMIT` is a single-group aggregator that stops after reading so many input + rows. +- `INTENT-COLLECTOR` is a single-group aggregator, scheduled on the gateway, + that receives all the intents generated by a `MUTATE` and keeps track of them + in memory until the transaction is committed. +- `FINAL` is a single-group aggregator, scheduled on the gateway, that collects + the results of the query. This aggregator will be hooked up to the pgwire + connection to the client. + +## From logical to physical + +To distribute the computation that was described in terms of aggregators and +logical streams, we use the following facts: + + - for any aggregator, groups can be partitioned into subsets and processed in + parallel, as long as all processing for a group happens on a single node. + + - the ordering characterization of an aggregator applies to *any* input stream + with a certain ordering; it is useful even when we have multiple parallel + instances of computation for that logical node: if the physical input streams + in all the parallel instances are ordered according to the logical input + stream guarantee (in the logical plan), the physical output streams in all + instances will have the output guarantee of the logical output stream. If at + some later stage these streams are merged into a single stream (merge-sorted, + i.e. with the ordering properly maintained), that physical stream will have + the correct ordering - that of the corresponding logical stream. + + - aggregators with empty group keys (`limit`, `final`) must have their final + processing on a single node (they can however have preliminary distributed + stages). + +So each logical aggregator can correspond to multiple distributed instances, and +each logical stream can correspond to multiple physical streams **with the same +ordering guarantees**. + +We can distribute using a few simple rules: + + - table readers have multiple instances, split according to the ranges; each + instance is processed by the raft leader of the relevant ranges and is the + start of a physical stream. + + - streams continue in parallel through programs. When an aggregator is reached, + the streams can be redistributed to an arbitrary number of instances using + hashing on the group key. Aggregators with empty group keys will have a + single physical instance, and the input streams are merged according to the + desired ordering. As mentioned above, each physical stream will be already + ordered (because they all correspond to an ordered logical stream). + + - sorting aggregators apply to each physical stream corresponding to the + logical stream it is sorting. A sort aggregator by itself will *not* result + in coalescing results into a single node. This is implicit from the fact that + (like evaluators) it requires no grouping. + +It is important to note that correctly distributing the work along range +boundaries is not necessary for correctness - if a range gets split or moved +while we are planning the query, it will not cause incorrect results. Some key +reads might be slower because they actually happen remotely, but as long as +*most of the time, most of the keys* are read locally this should not be a +problem. + +Assume that we run the Example 1 query on a **Gateway** node and the table has +data that on two nodes **A** and **B** (i.e. these two nodes are masters for all +the relevant range). The logical plan is: + +``` +TABLE-READER src + Table: Orders + Table schema: Oid:INT, Cid:INT, Value:DECIMAL, Date:DATE + Output filter: (Date > 2015) + Output schema: Cid:INT, Value:DECIMAL + Ordering guarantee: Oid + +AGGREGATOR summer + Input schema: Cid:INT, Value:DECIMAL + Output schema: Cid:INT, ValueSum:DECIMAL + Group Key: Cid + Ordering characterization: if input ordered by Cid, output ordered by Cid + +EVALUATOR sortval + Input schema: Cid:INT, ValueSum:DECIMAL + Output schema: SortVal:DECIMAL, Cid:INT, ValueSum:DECIMAL + Ordering characterization: if input ordered by [Cid,]ValueSum[,Cid], output ordered by [Cid,]-ValueSum[,Cid] + SQL Expressions: E(x:INT) INT = (1 - x) + Code { + EMIT E(ValueSum), CId, ValueSum + } +``` + +![Logical plan](images/distributed_sql_logical_plan.png?raw=true "Logical Plan") + +This logical plan above could be instantiated as the following physical plan: + +![Physical plan](images/distributed_sql_physical_plan.png?raw=true "Physical Plan") + +Each box in the physical plan is a *processor*: + - `src` is a table reader and performs KV Get operations and forms rows; it is + programmed to read the spans that belong to the respective node. It evaluates + the `Date > 2015` filter before outputting rows. + - `summer-stage1` is the first stage of the `summer` aggregator; its purpose is + to do the aggregation it can do locally and distribute the partial results to + the `summer-stage2` processes, such that all values for a certain group key + (`CId`) reach the same process (by hashing `CId` to one of two "buckets"). + - `summer-stage2` performs the actual sum and outputs the index (`CId`) and + corresponding sum. + - `sortval` calculates and emits the additional `SortVal` value, along with the + `CId` and `ValueSum` + - `sort` sorts the stream according to `SortVal` + - `final` merges the two input streams of data to produce the final sorted + result. + +Note that the second stage of the `summer` aggregator doesn't need to run on the +same nodes; for example, an alternate physical plan could use a single stage 2 +processor: + +![Alternate physical plan](images/distributed_sql_physical_plan_2.png?raw=true "Alternate physical Plan") + +The processors always form a directed acyclic graph. + +### Processors + +Processors are generally made up of three components: + +![Processor](images/distributed_sql_processor.png?raw=true "Processor") + +1. The *input synchronizer* merges the input streams into a single stream of + data. Types: + * single-input (pass-through) + * unsynchronized: passes rows from all input streams, arbitrarily + interleaved. + * ordered: the input physical streams have an ordering guarantee (namely the + guarantee of the corresponding logical stream); the synchronizer is careful + to interleave the streams so that the merged stream has the same guarantee. + +2. The *data processor* core implements the data transformation or aggregation + logic (and in some cases performs KV operations). + +3. The *output router* splits the data processor's output to multiple streams; + types: + * single-output (pass-through) + * mirror: every row is sent to all output streams + * hashing: each row goes to a single output stream, chosen according + to a hash function applied on certain elements of the data tuples. + * by range: the router is configured with range information (relating to a + certain table) and is able to send rows to the nodes that are lease holders for + the respective ranges (useful for `JoinReader` nodes (taking index values + to the node responsible for the PK) and `INSERT` (taking new rows to their + lease holder-to-be)). + +## Joins + +### Join-by-lookup + +The join-by-lookup method involves receiving data from one table and looking up +corresponding rows from another table. It is typically used for joining an index +with the table, but they can be used for any join in the right circumstances, +e.g. joining a small number of rows from one table ON the primary key of another +table. We introduce a variant of `TABLE-READER` which has an input stream. Each +element of the input stream drives a point-lookup in another table or index, +with a corresponding value in the output. Internally the aggregator performs +lookups in batches, the way we already do it today. Example: + +```sql +TABLE t (k INT PRIMARY KEY, u INT, v INT, INDEX(u)) +SELECT k, u, v FROM t WHERE u >= 1 AND u <= 5 +``` +Logical plan: +``` +TABLE-READER indexsrc +Table: t@u, span /1-/6 +Output schema: k:INT, u:INT +Output ordering: u + +JOIN-READER pksrc +Table: t +Input schema: k:INT, u:INT +Output schema: k:INT, u:INT, v:INT +Ordering characterization: preserves any ordering on k/u + +AGGREGATOR final +Input schema: k:INT, u:INT, v:INT + +indexsrc -> pksrc -> final +``` + +Example of when this can be used for a join: +``` +TABLE t1 (k INT PRIMARY KEY, v INT, INDEX(v)) +TABLE t2 (k INT PRIMARY KEY, w INT) +SELECT t1.k, t1.v, t2.w FROM t1 INNER JOIN t2 ON t1.k = t2.k WHERE t1.v >= 1 AND t1.v <= 5 +``` + +Logical plan: +``` +TABLE-READER t1src +Table: t1@v, span /1-/6 +Output schema: k:INT, v:INT +Output ordering: v + +JOIN-READER t2src +Table: t2 +Input schema: k:INT, v:INT +Output schema: k:INT, v:INT, w:INT +Ordering characterization: preserves any ordering on k + +AGGREGATOR final +Input schema: k:INT, u:INT, v:INT + +t1src -> t2src -> final +``` + +Note that `JOIN-READER` has the capability of "plumbing" through an input column +to the output (`v` in this case). In the case of index joins, this is only +useful to skip reading or decoding the values for `v`; but in the general case +it is necessary to pass through columns from the first table. + +In terms of the physical implementation of `JOIN-READER`, there are two possibilities: + + 1. it can perform the KV queries (in batches) from the node it is receiving the + physical input stream from; the output stream continues on the same node. + + This is simple but involves round-trips between the node and the range + lease holders. We will probably use this strategy for the first implementation. + + 2. it can use routers-by-range to route each input to an instance of + `JOIN-READER` on the node for the respective range of `t2`; the flow of data + continues on that node. + + This avoids the round-trip but is problematic because we may be setting up + flows on too many nodes (for a large table, many/all nodes in the cluster + could hold ranges). To implement this effectively, we require the ability to + set up the flows "lazily" (as needed), only when we actually find a row that + needs to go through a certain flow. This strategy can be particularly useful + when the ordering of `t1` and `t2` are correlated (e.g. t1 could be ordered + by a date, `t2` could be ordered by an implicit primary key). + + + Even with this optimization, it would be wasteful if we involve too many + remote nodes but we only end up doing a handful of queries. We can + investigate a hybrid approach where we batch some results and depending on + how many we have and how many different ranges/nodes they span, we choose + between the two strategies. + + +### Stream joins + +The join aggregator performs a join on two streams, with equality constraints +between certain columns. The aggregator is grouped on the columns that are +constrained to be equal. + +``` +TABLE People (First STRING, Last STRING, Age INT) +TABLE Applications (College STRING PRIMARY KEY, First STRING, Last STRING) +SELECT College, Last, First, Age FROM People INNER JOIN Applications ON First, Last + +TABLE-READER src1 +Table: People +Output Schema: First:STRING, Last:STRING, Age:INT +Output Ordering: none + +TABLE_READER src2 +Table: Applications +Output Schema: College:STRING, First:STRING, Last:STRING +Output Ordering: none + +JOIN AGGREGATOR join +Input schemas: + 1: First:STRING, Last:STRING, Age:INT + 2: College:STRING, First:STRING, Last:STRING +Output schema: First:STRING, Last:STRING, Age:INT, College:STRING +Group key: (1.First, 1.Last) = (2.First, 2.Last) // we need to get the group key from either stream +Order characterization: no order preserved // could also preserve the order of one of the streams + +AGGREGATOR final + Ordering requirement: none + Input schema: First:STRING, Last:STRING, Age:INT, College:STRING +``` + +![Logical plan for join](images/distributed_sql_join_logical.png?raw=true "Logical plan for join") + +At the heart of the physical implementation of the stream join aggregators sits +the join processor which (in general) puts all the rows from one stream in a +hash map and then processes the other stream. If both streams are ordered by the +group key, it can perform a merge-join which requires less memory. + + +Using the same join processor implementation, we can implement different +distribution strategies depending how we set up the physical streams and +routers: + + - the routers can distribute each row to one of multiple join processors based + on a hash on the elements for the group key; this ensures that all elements + in a group reach the same instance, achieving a hash-join. An example + physical plan: + + ![Physical plan for hash join](images/distributed_sql_join_physical.png?raw=true "Physical plan for hash join") + + - the routers can *duplicate* all rows from the physical streams for one table + and distribute copies to all processor instances; the streams for the other + table get processed on their respective nodes. This strategy is useful when + we are joining a big table with a small table, and can be particularly useful + for subqueries, e.g. `SELECT ... WHERE ... AND x IN (SELECT ...)`. + + For the query above, if we expect few results from `src2`, this plan would + be more efficient: + + ![Physical plan for dup join](images/distributed_sql_join_physical2.png?raw=true "Physical plan for dup join") + + The difference in this case is that the streams for the first table stay on + the same node, and the routers after the `src2` table readers are configured + to mirror the results (instead of distributing by hash in the previous case). + + +## Inter-stream ordering + +**This is a feature that relates to implementing certain optimizations, but does +not alter the structure of logical or physical plans. It will not be part of the +initial implementation but we keep it in mind for potential use at a later +point.** + +Consider this example: +```sql +TABLE t (k INT PRIMARY KEY, v INT) +SELECT k, v FROM t WHERE k + v > 10 ORDER BY k +``` + +This is a simple plan: + +``` +READER src + Table: t + Output filter: (k + v > 10) + Output schema: k:INT, v:INT + Ordering guarantee: k + +AGGREGATOR final: + Input schema: k:INT, v:INT + Input ordering requirement: k + Group Key: [] + +Composition: src -> final +``` + +Now let's say that the table spans two ranges on two different nodes - one range +for keys `k <= 10` and one range for keys `k > 10`. In the physical plan we +would have two streams starting from two readers; the streams get merged into a +single stream before `final`. But in this case, we know that *all* elements in +one stream are ordered before *all* elements in the other stream - we say that +we have an **inter-stream ordering**. We can be more efficient when merging +(before `final`): we simply read all elements from the first stream and then all +elements from the second stream. Moreover, we would also know that the reader +and other processors for the second stream don't need to be scheduled until the +first stream is consumed, which is useful information for scheduling the query. +In particular, this is important when we have a query with `ORDER BY` and +`LIMIT`: the limit would be represented by an aggregator with a single group, +with physical streams merging at that point; knowledge of the inter-stream +ordering would allow us to potentially satisfy the limit by only reading from +one range. + +We add the concept of inter-physical-stream ordering to the logical plan - it is +a property of a logical stream (even though it refers to multiple physical +streams that could be associated with that logical stream). We annotate all +aggregators with an **inter-stream ordering characterization function** (similar +to the ordering characterization described above, which can be thought of as +"intra-stream" ordering). The inter-stream ordering function maps an input +ordering to an output ordering, with the meaning that if the physical streams +that are inputs to distributed instances of that aggregator have the +inter-stream ordering described by input, then the output streams have the given +output ordering. + +Like the intra-stream ordering information, we can propagate the inter-stream +ordering information starting from the table readers onward. The streams coming +out of a table reader have an inter-stream order if the spans each reader "works +on" have a total order (this is always the case if each table reader is +associated to a separate range). + +The information can be used to apply the optimization above - if a logical +stream has an appropriate associated inter-stream ordering, the merging of the +physical streams can happen by reading the streams sequentially. The same +information can be used for scheduling optimizations (such as scheduling table +readers that eventually feed into a limit sequentially instead of +concurrently). + +## Execution infrastructure + +Once a physical plan has been generated, the system needs to divvy it up +between the nodes and send it around for execution. Each node is responsible +for locally scheduling data processors and input synchronizers. Nodes also need +to be able to communicate with each other for connecting output routers to +input synchronizers. In particular, a streaming interface is needed for +connecting these actors. To avoid paying extra synchronization costs, the +execution environment providing all these needs to be flexible enough so that +different nodes can start executing their pieces in isolation, without any +orchestration from the gateway besides the initial request to schedule a part +of the plan. + +### Creating a local plan: the `ScheduleFlows` RPC + +Distributed execution starts with the gateway making a request to every node +that's supposed to execute part of the plan asking the node to schedule the +sub-plan(s) it's responsible for (modulo "on-the-fly" flows, see below). A node +might be responsible for multiple disparate pieces of the overall DAG. Let's +call each of them a *flow*. A flow is described by the sequence of physical +plan nodes in it, the connections between them (input synchronizers, output +routers) plus identifiers for the input streams of the top node in the plan and +the output streams of the (possibly multiple) bottom nodes. A node might be +responsible for multiple heterogeneous flows. More commonly, when a node is the +lease holder for multiple ranges from the same table involved in the query, it will +be responsible for a set of homogeneous flows, one per range, all starting with +a `TableReader` processor. In the beginning, we'll coalesce all these +`TableReader`s into one, configured with all the spans to be read across all +the ranges local to the node. This means that we'll lose the inter-stream +ordering (since we've turned everything into a single stream). Later on we +might move to having one `TableReader` per range, so that we can schedule +multiple of them to run in parallel. + +A node therefore implements a `ScheduleFlows` RPC which takes a set of flows, +sets up the input and output mailboxes (see below), creates the local +processors and starts their execution. We might imagine at some point +implementing admission control for flows at the node boundary, in which case +the RPC response would have the option to push back on the volume of work +that's being requested. + +### Local scheduling of flows + +The simplest way to schedule the different processors locally on a node is +concurrently: each data processor, synchronizer and router can be run as a +goroutine, with channels between them. The channels can be buffered to +synchronize producers and consumers to a controllable degree. + +### Mailboxes + +Flows on different nodes communicate with each other over GRPC streams. To +allow the producer and the consumer to start at different times, +`ScheduleFlows` creates named mailboxes for all the input and output streams. +These message boxes will hold some number of tuples in an internal queue until +a GRPC stream is established for transporting them. From that moment on, GRPC +flow control is used to synchronize the producer and consumer. +A GRPC stream is established by the consumer using the `StreamMailbox` RPC, +taking a mailbox id (the same one that's been already used in the flows passed +to `ScheduleFlows`). This call might arrive to a node even before the +corresponding `ScheduleFlows` arrives. In this case, the mailbox is created on +the fly, in the hope that the `ScheduleFlows` will follow soon. If that doesn't +happen within a timeout, the mailbox is retired. +Mailboxes present a channel interface to the local processors. +If we move to a multiple `TableReader`s/flows per node, we'd still want one +single output mailbox for all the homogeneous flows (if a node has 1mil ranges, +we don't want 1mil mailboxes/streams). At that point we might want to add +tagging to the different streams entering the mailbox, so that the inter-stream +ordering property can still be used by the consumer. + +A diagram of a simple query using mailboxes for its execution: +![Mailboxes](images/distributed_sql_mailboxes.png?raw=true) + +### On-the-fly flows setup + +In a couple of cases, we don't want all the flows to be setup from the get-go. +`PointLookup` and `Mutate` generally start on a few ranges and then send data +to arbitrary nodes. The amount of data to be sent to each node will often be +very small (e.g. `PointLookup` might perform a handful of lookups in total on +table *A*, so we don't want to set up receivers for those lookups on all nodes +containing ranges for table *A*. Instead, the physical plan will contain just +one processor, making the `PointLookup` aggregator single-stage; this node can +chose whether to perform KV operations directly to do the lookups (for ranges +with few lookups), or setup remote flows on the fly using the `ScheduleFlows` +RPC for ranges with tons of lookups. In this case, the idea is to push a bunch +of the computation to the data, so the flow passed to `ScheduleFlows` will be a +copy of the physical nodes downstream of the aggregator, including filtering +and aggregation. We imagine the processor will take the decision to move to +this heavyweight process once it sees that it's batching a lot of lookups for +the same range. + +## Retiring flows + +Processors and mailboxes needs to be destroyed at some point. This happens +under a number of circumstances: + +1. A processor retires when it receives a sentinel on all of its input streams + and has outputted the last tuple (+ a sentinel) on all of its output + streams. +2. A processor retires once either one of its input or output streams is + closed. This can be used by a consumer to inform its producers that it's + gotten all the data it needs. +3. An input mailbox retires once it has put the sentinel on the wire or once + its GRPC stream is closed remotely. +4. An output mailbox retires once it has passed on the sentinel to the reader, + which it does once all of its input channels are closed (remember that an + output mailbox may receive input from many channels, one per member of a + homogeneous flow family). It also retires if its GRPC stream is closed + remotely. +5. `TableReader` retires once it has delivered the last tuple in its range (+ a + sentinel). + +### Error handling + +At least initially, the plan is to have no error recovery (anything goes wrong +during execution, the query fails and the transaction is rolled back). +The only concern is releasing all resources taken by the plan nodes. +This can be done by propagating an error signal when any GRPC stream is +closed abruptly. +Similarly, cancelling a running query can be done by asking the `FINAL` processor +to close its input channel. This close will propagate backwards to all plan nodes. + + +# A more complex example: Daily Promotion + +Let's draw a possible logical and physical plan for a more complex query. The +point of the query is to help with a promotion that goes out daily, targeting +customers that have spent over $1000 in the last year. We'll insert into the +`DailyPromotion` table rows representing each such customer and the sum of her +recent orders. + +```SQL +TABLE DailyPromotion ( + Email TEXT, + Name TEXT, + OrderCount INT +) + +TABLE Customers ( + CustomerID INT PRIMARY KEY, + Email TEXT, + Name TEXT +) + +TABLE Orders ( + CustomerID INT, + Date DATETIME, + Value INT, + + PRIMARY KEY (CustomerID, Date), + INDEX date (Date) +) + +INSERT INTO DailyPromotion +(SELECT c.Email, c.Name, os.OrderCount FROM + Customers AS c + INNER JOIN + (SELECT CustomerID, COUNT(*) as OrderCount FROM Orders + WHERE Date >= '2015-01-01' + GROUP BY CustomerID HAVING SUM(Value) >= 1000) AS os + ON c.CustomerID = os.CustomerID) +``` + +Logical plan: + +``` +TABLE-READER orders-by-date + Table: Orders@OrderByDate /2015-01-01 - + Input schema: Date: Datetime, OrderID: INT + Output schema: Cid:INT, Value:DECIMAL + Output filter: None (the filter has been turned into a scan range) + Intra-stream ordering characterization: Date + Inter-stream ordering characterization: Date + +JOIN-READER orders + Table: Orders + Input schema: Oid:INT, Date:DATETIME + Output filter: None + Output schema: Cid:INT, Date:DATETIME, Value:INT + // TODO: The ordering characterizations aren't necessary in this example + // and we might get better performance if we remove it and let the aggregator + // emit results out of order. Update after the section on backpropagation of + // ordering requirements. + Intra-stream ordering characterization: same as input + Inter-stream ordering characterization: Oid + +AGGREGATOR count-and-sum + Input schema: CustomerID:INT, Value:INT + Aggregation: SUM(Value) as sumval:INT + COUNT(*) as OrderCount:INT + Group key: CustomerID + Output schema: CustomerID:INT, OrderCount:INT + Output filter: sumval >= 1000 + Intra-stream ordering characterization: None + Inter-stream ordering characterization: None + +JOIN-READER customers + Table: Customers + Input schema: CustomerID:INT, OrderCount: INT + Output schema: e-mail: TEXT, Name: TEXT, OrderCount: INT + Output filter: None + // TODO: The ordering characterizations aren't necessary in this example + // and we might get better performance if we remove it and let the aggregator + // emit results out of order. Update after the section on backpropagation of + // ordering requirements. + Intra-stream ordering characterization: same as input + Inter-stream ordering characterization: same as input + +INSERT inserter + Table: DailyPromotion + Input schema: email: TEXT, name: TEXT, OrderCount: INT + Table schema: email: TEXT, name: TEXT, OrderCount: INT + +INTENT-COLLECTOR intent-collector + Group key: [] + Input schema: k: TEXT, v: TEXT + +AGGREGATOR final: + Input schema: rows-inserted:INT + Aggregation: SUM(rows-inserted) as rows-inserted:INT + Group Key: [] + +Composition: +order-by-date -> orders -> count-and-sum -> customers -> inserter -> intent-collector + \-> final (sum) +``` + +A possible physical plan: +![Physical plan](images/distributed_sql_daily_promotion_physical_plan.png?raw=true) + +# Implementation strategy + +There are five streams of work. We keep in mind two initial milestones to track +the extent of progress we must achieve within each stream: +- Milestone M1: offloading filters to remote side +- Milestone M2: offloading updates to remote side + +### Logical planning + +Building a logical plan for a statement involves many aspects: + - index selection + - query optimization + - choosing between various strategies for sorting, aggregation + - choosing between join strategies + +This is a very big area where we will see a long tail of improvements over time. +However, we can start with a basic implementation based on the existing code. +For a while we can use a hybrid approach where we implement table reading and +filtering using the new framework and make the results accessible via a +`planNode` so we can use the existing code for everything else. This would be +sufficient for M1. The next stage would be implementing the mutation aggregators +and refactoring the existing code to allow using them (enabling M2). + +### Physical planning + +A lot of the decisions in the physical planning stage are "forced" - table +readers are distributed according to ranges, and much of the physical planning +follows from that. + +One place where physical planning involves difficult decisions is when +distributing the second stage of an aggregation or join - we could set up any +number of "buckets" (and subsequent flows) on any nodes. E.g. see the `summer` +example. Fortunately there is a simple strategy we can start with - use as many +buckets as input flows and distribute them among the same nodes. This strategy +scales well with the query size: if a query draws data from a single node, we +will do all the aggregation on that node; if a query draws data from many nodes, +we will distribute the aggregation among those nodes. + +We will also support configuring things to minimize the distribution - getting +everything back on the single gateway node as quickly as possible. This will be +useful to compare with the current "everything on the gateway" approach; it is +also a conservative step that might avoid some problems when distributing +queries between too many nodes. + +A "stage 2" refinement would be detecting when a computation (and subsequent +stages) might be cheap enough to not need distribution and automatically switch +to performing the aggregation on the gateway node. Further improvements +(statistics based) can be investigated later. + +We should add extended SQL syntax to allow the query author to control some of +these parameters, even if only as a development/testing facility that we don't +advertise. + +### Processor infrastructure and implementing processors + +This involves building the framework within which we can run processors and +implementing the various processors we need. We can start with the table readers +(enough for M1). If necessary, this work stream can advance faster than the +logical planning stream - we can build processors even if the logical plan isn't +using them yet (as long as there is a good testing framework in place); we can +also potentially use the implementations internally, hidden behind a `planNode` +interface and running non-distributed. + +##### Joins + +A tangential work stream is to make progress toward supporting joins (initially +non-distributed). This involves building the processor that will be at the heart +of the hash join implementation and integrating that code with the current +`planNode` tree. + +### Scheduling + +The problem of efficiently queuing and scheduling processors will also involve a +long tail of improvements. But we can start with a basic implementation using +simple strategies: + - the queue ordering is by transactions; we don't order individual processors + - limit the number of transactions for which we run processors at any one time; + we can also limit the total number of processors running at any one time, as + long as we allow all the processors needed for at least one txn + - the txn queuing order is a function of the txn timestamp and its priority, + allowing the nodes to automatically agree on the relative ordering of + transactions, eliminating deadlock scenarios (example of a deadlock: txn A + has some processors running on node 1, and some processors on node 2 that are + queued behind running processors of txn B; and txn B also has some processors + that are queued behind txn A on node 1) + +We won't need anything fancier in this area to reach M1 and M2. + +### KV integration + +We do not propose introducing any new KV Get/Put APIs. The current APIs are to +be used; we simply rely on the fact that when run from the lease holder node they will +be faster as the work they do is local. + +However, we still require some integration with the KV layer: + +1. Range information lookup + + At the physical planning stage we need to break up key spans into ranges and + determine who is the lease holder for each range. We may also use range info at the + logical planning phase to help estimate table sizes (for index selection, + join order, etc). The KV layer already has a range cache that maintains this + information, but we will need to make changes to be more aggressive in terms + of how much information we maintain, and how we invalidate/update it. + +2. Distributed reads + + There is very little in the KV layer that needs to change to allow + distributed reads - they are currently prevented only because of a fix + involving detecting aborted transactions (PR #5323). + +3. Distributed writes + + The txn coordinator currently keeps track of all the modified keys or key + ranges. The new sql distribution layer is designed to allow funneling the + modified key information back to the gateway node (which acts as the txn + coordinator). There will need to be some integration here, to allow us to + pass this information to the KV layer. There are also likely various cases + where checking for error cases must be relaxed. + +The details of all these need to be further investigated. Only 1 and 2 are +required for M1; 3 is required for M2. + +Another potential improvement is fixing the impedance mismatch between the +`TableReader`, which produces a stream, and the underlying KV range reads, +which do batch reading. Eventually we'll need a streaming reading interface for +range reads, although in the beginning we can use what we have. + +## Implementation notes + +Capturing various notes and suggestions. + +#### Test infrastructure + +Either extend the logictest framework to allow specification of additional +system attributes, or create new framework. We must have tests that can control +various settings (number of nodes, range distribution etc) and examine the +resulting query plans. + +#### Visualisation/tracing + +Detailed information about logical and physical plans must be available, as well +as detailed traces for all phases of queries, including execution timings, +stats, etc. + +At the SQL we will have to present data, plans in textual representations. One +idea to help with visualisation is to build a small web page where we can paste +a textual representation to get a graphical display. + +# Alternate approaches considered (and rejected) + +We outline a few different approaches we considered but eventually decided +against. + +## More logic in the KV layer + +In this approach we would build more intelligence in the KV layer. It would +understand rows, and it would be able to process expressions (either SQL +expressions, or some kind of simplified language, e.g. string based). + +### Complexity + +Most of the complexity of this approach is around building APIs and support for +expressions. For full SQL expressions, we would need a KV-level language that +is able to read and modify SQL values without being part of the SQL layer. This +would mean a compiler able to translate SQL expressions to programs in a +KV-level VM that perform the SQL-to-bytes and bytes-to-SQL translations +explicitly (i.e. translate/migrate our data encoding routines from Go to that +KV-level VM's instructions). + +### Applicability + +The applicability of this technique is limited: it would work well for +filtering and possibly for remote-side updates, but it is hard to imagine +building the logic necessary for distributed SQL operations (joins, +aggregation) into the KV layer. + +It seems that if we want to meet all described goals, we need to make use of a +smarter approach. With this in mind, expending any effort toward this approach +seems wasteful at this point in time. We may want to implement some of these +ideas in the future if it helps make things more efficient, but for now we +should focus on initial steps towards a more encompassing solution. + + +## SQL2SQL: Distributed SQL layer + +In this approach we would build a distributed SQL layer, where the SQL layer of +a node can make requests to the SQL layer of any other node. The SQL layer +would "peek" into the range information in the KV layer to decide how to split +the workload so that data is processed by the respective raft range lease holders. +Achieving a correct distribution to range lease holders would not be necessary for +correctness; thus we wouldn't need to build extra coordination with the KV +layer to synchronize with range splits/merges or lease holdership changes during an +SQL operation. + + +### Sample high-level query flows + +Sample flow of a “simple” query (select or update with filter): + +| **Node A** | **Node B** | **Node C** | **Node D** | +|--------------------------------------------|--------------|------------|------------| +| Receives statement | | | | +| Finds that the table data spans three ranges on **B**, **C**, **D** | | | +| Sends scan requests to **B**, **C**, **D** | | | | +| | Starts scan (w/filtering, updates) | Starts scan (w/filtering, updates) | Starts scan (w/filtering, updates) | +| | Sends results back to **A** | Sends results back to **A** | Sends results back to **A** | +| Aggregates and returns results. | | | | + +Sample flow for a hash-join: + +| **Node A** | **Node B** | **Node C** | **Node D** | +|--------------------------------------------|--------------|------------|------------| +| Receives statement | | | | +| Finds that the table data spans three ranges on **B**, **C**, **D** | | | +| Sets up 3 join buckets on B, C, D | | | | +| | Expects join data for bucket 0 | Expects join data for bucket 1 | Expects join data for bucket 2 | +| Sends scan requests to **B**, **C**, **D** | | | | +| | Starts scan (w/ filtering). Results are sent to the three buckets in batches | Starts scan (w/ filtering) Results are sent to the three buckets in batches | Starts scan (w/ filtering). Results are sent to the three buckets in batches | +| | Tells **A** scan is finished | Tells **A** scan is finished | Tells **A** scan is finished | +| Sends finalize requests to the buckets | | | | +| | Sends bucket data to **A** | Sends bucket data to **A** | Sends bucket data to **A** | +| Returns results | | | | + +### Complexity + +We would need to build new infrastructure and APIs for SQL-to-SQL. The APIs +would need to support SQL expressions, either as SQL strings (which requires +each node to re-parse expressions) or a more efficient serialization of ASTs. + +The APIs also need to include information about what key ranges the request +should be restricted to (so that a node processes the keys that it is lease holder +for - or at least was, at the time when we started the operation). Since tables +can span many raft ranges, this information can include a large number of +disjoint key ranges. + +The design should not be rigid on the assumption that for any key there is a +single node with "fast" access to that key. In the future we may implement +consensus algorithms like EPaxos which allow operations to happen directly on +the replicas, giving us multiple choices for how to distribute an operation. + +Finally, the APIs must be designed to allow overlap between processing, network +transfer, and storage operations - it should be possible to stream results +before all of them are available (F1 goes as far as streaming results +out-of-order as they become available from storage). + +### Applicability + +This general approach can be used for distributed SQL operations as well as +remote-side filtering and updates. The main drawback of this approach is that it +is very general and not prescriptive on how to build reusable pieces of +functionality. It is not clear how we could break apart the work in modular +pieces, and it has the potential of evolving into a monster of unmanageable +complexity. + + +## Spark: Compiling SQL into a data-parallel language running on top of a distributed-execution runtime + +The idea here is to introduce a new system - an execution environment for +distributed computation. The computations use a programming model like M/R, or +more pipeline stuff - Spark, or Google's [Dataflow][1] (parts of it are an +Apache project that can run on top of other execution environments - e.g. +Spark). + +In these models, you think about arrays of data, or maps on which you can +operate in parallel. The storage for these is distributed. And all you do is +operation on these arrays or maps - sort them, group them by key, transform +them, filter them. You can also operate on pairs of these datasets to do joins. + +These models try to have *a)* smart compilers that do symbolic execution, e.g. +fuse as many operations together as possible - `map(f, map(g, dataset)) == map(f +● g, dataset)` and *b)* dynamic runtimes. The runtimes probably look at operations +after their input have been at least partially computed and decide which nodes +participate in this current operation based on who has the input and who needs +the output. And maybe some of this work has already been done for us in one of +these open source projects. + +The idea would be to compile SQL into this sort of language, considering that we +start execution with one big sorted map as a dataset, and run it.If the +execution environment is good, it takes advantage of the data topology. This is +different from "distributed sql" because *a)* the execution environment is +dynamic, so you don't need to come up with an execution plan up front that says +what node is gonna issues what command to what other node and *b)* data can be +pushed from one node to another, not just pulled. + +We can start small - no distributed runtime, just filtering for `SELECTS` and +filtering with side effects for `UPDATE, DELETE, INSERT FROM SELECT`. But we +build this outside of KV; we build it on top of KV (these programs call into KV, +as opposed to KV calling a filtering callback for every k/v or row). + +[1]: https://cloud.google.com/dataflow/model/programming-model + +### Sample program + +Here's a quick sketch of a program that does remote-side filtering and deletion +for a table with an index. + +Written in a language for (what I imagine to be) Spark-like parallel operations. +The code is pretty tailored to this particular table and this particular query +(which is a good thing). The idea of the exercise is to see if it'd be feasible +at all to generate such a thing, assuming we had a way to execute it. + +The language has some data types, notably maps and tuples, besides the +distributed maps that the computation is based on. It interfaces with KV through +some `builtin::` functions. + +It starts with a map corresponding to the KV map, and then munges and aggregates +the keys to form a map of "rows", and then generates the delete KV commands. + +The structure of the computation would stay the same and the code would be a lot +shorter if it weren't tailored to this particular table, and instead it used +generic built-in functions to encode and decode primary key keys and index keys. + +```sql +TABLE t ( + id1 int + id2 string + a string + b int DEFAULT NULL + + PRIMARY KEY id1, id2 + INDEX foo (id1, b) +) + +DELETE FROM t WHERE id1 >= 100 AND id2 < 200 AND len(id2) == 5 and b == 77 +``` + +```go +func runQuery() { + // raw => Map. The key is a primary key string - table id, id1, + // id2, col id. This map is also sorted. + raw = builtin::readRange("t/primary_key/100", "t/primary_key/200") + + // m1 => Map<(int, string), (int, string)>. This map is also sorted because the + // input is sorted and the function maintains sorting. + m1 = Map(raw, transformPK). + + // Now build something resembling SQL rows. Since m1 is sorted, ReduceByKey is + // a simple sequential scan of m1. + // m2 => Map<(int, string), Map>. These are the rows. + m2 = ReduceByKey(m1, buildColMap) + + // afterFilter => Map<(int, string), Map>. Like m2, but only the rows that passed the filter + afterFilter = Map(m2, filter) + + // now we batch up all delete commands, for the primary key (one KV command + // per SQL column), and for the indexes (one KV command per SQL row) + Map(afterFilter, deletePK) + Map(afterFilter, deleteIndexFoo) + + // return the number of rows affected + return len(afterFilter) +} + +func transformPK(k, v) { + #pragma maintainsSort // important, keys remain sorted. So future + // reduce-by-key operations are efficient + id1, id2, colId = breakPrimaryKey(k) + return (key: {id1, id2}, value: (colId, v)) +} + +func breakPrimaryKey(k) { + // remove table id and the col_id + tableId, remaining = consumeInt(k) + id1, remaining = consumeInt(remaining) + id2, remaining = consumeInt(remaining) + colId = consumeInt(remaining) + returns (id1, id2, colId) +} + +func BuildColMap(k, val) { + colId, originalVal = val // unpack + a, remaining = consumeInt(originalVal) + b, remaining = consumeString(remaining) + // output produces a result. Can appear 0, 1 or more times. + output (k, {'colId': colId, 'a': a, 'b': b}) +} + +func Filter(k, v) { + // id1 >= 100 AND id2 < 200 AND len(id2) == 5 and b == 77 + id1, id2 = k + if len(id2) == 5 && v.getWithDefault('b', NULL) == 77 { + output (k, v) + } + // if filter doesn't pass, we don't output anything +} + + +func deletePK(k, v) { + id1, id2 = k + // delete KV row for column a + builtIn::delete(makePK(id1, id2, 'a')) + // delete KV row for column b, if it exists + if v.hasKey('b') { + builtIn::delete(makePK(id1, id2, 'b')) + } +} + +func deleteIndexFoo(k, v) { + id1, id2 = k + b = v.getWithDefault('b', NULL) + + builtIn::delete(makeIndex(id1, b)) +} +``` + +### Complexity + +This approach involves building the most machinery; it is probably overkill +unless we want to use that machinery in other ways than SQL. + +# Unresolved questions + +The question of what unresolved questions there are is, as of yet, unresolved. diff --git a/src/current/files/cockroach/docs/RFCS/20160706_expressive_zone_config.md b/src/current/files/cockroach/docs/RFCS/20160706_expressive_zone_config.md new file mode 100644 index 00000000000..95abb4b5d19 --- /dev/null +++ b/src/current/files/cockroach/docs/RFCS/20160706_expressive_zone_config.md @@ -0,0 +1,241 @@ +- Feature Name: Expressive ZoneConfig +- Status: obsolete +- Start Date: 2016-07-06 +- Authors: @d4l3k +- RFC PR: [#7660](https://github.com/cockroachdb/cockroach/pull/7660) +- Cockroach Issue: [#4868](https://github.com/cockroachdb/cockroach/issues/4868) + +# Summary + +This document has been made partially obsolete by more recent changes to +ZoneConfig constraints. For the latest information, please see [the +docs](https://www.cockroachlabs.com/docs/stable/configure-replication-zones.html). + +# Motivation + +The current ZoneConfig format has a number of drawbacks that make it +hard to specify the number of replicas and more complicated constraints. + +Current ZoneConfig format: +```yaml +replicas: +- attrs: [comma-separated attribute list] +- attrs: [comma-separated attribute list] +- attrs: [comma-separated attribute list] +range_min_bytes: +range_max_bytes: +gc: + ttlseconds: +``` +https://www.cockroachlabs.com/docs/v2.0/configure-replication-zones#replication-zone-format + +## Number of replicas + +Currently, the number of replicas is controlled by adding extra `- attrs: []` +elements to the replicas array. + +Comment from @bdarnell on #4866: + +> I'd be in favor of introducing a num_replicas field which would be used in +> place of len(attrs). If attrs are present they would be used to constrain the +> replica selection in the way that they are today, otherwise it is as if there +> are num_replicas empty attr entries. No need to actually address the issues +> around negative or diversity constraints at this point. + +## Replication Constraints + +Currently it's only possible to specify positive constraints, such as a replica +must be placed on a store with an SSD or in `us-west-1`. Each node/store must +have the list of tags provided in the command line arguments to the server. +There's also no way to do wild card matches such as `us-.*`. + +### Types of constraints + +* Hardware + * What type of backing store `hdd`, `ssd`, `mem`. +* Location + * Cloud provider `gce`, `aws` + * Data center `us-west-1` + * Sub data center `rack-12` + * These could also all be combined into one `gce-us-west-1-rack-12`. + +### Potentially desired constraints + +* Positive (must match) + * on `ssd` + * in `us-.*`. +* Negative (must not match) + * not on `hdd` + * not in `us-.*` +* Diversity (replicas distributed to minimize risk) + * 3 replicas with no 2 replicas on the same rack unless there are less than 3 + racks +* Compound + * in `us` or `canada` + * not in `us` or `canada` + * in `us` and on `ssd` + * in `us` and not on `ssd` + +# Detailed design + +Note: This is one potential design. See [Alternatives](#alternatives) and +[Unresolved Questions](#unresolved-questions). + +## New ZoneConfig Format +```yaml +num_replicas: +constraints: [comma-separated constraint list] +range_min_bytes: +range_max_bytes: +gc: + ttlseconds: +``` + +## Constraint system + +This approach would be to extend the current simple tag matching with modifiers +and the ability to do regex matches. + +Comment from @petermattis on #4868: + +> For example, instead of a single expression we could imagine there are a +> series of expressions. `diversity(us-.*), require(hdd), prohibit(eu-.*)` would +> provide diversity across nodes containing an attribute matching `us-.*`, +> require nodes to have the attribute `hdd` and prohibit nodes from containing +> the attribute `eu-.*`. To make this more terse we could provide short-hand +> operators: `~us-.*, +hdd, !eu.*`. We'd want some rule about how to drop +> constraints when they can't be achieved. + +This would be implemented as a simple array of string tags (similar to the +current implementation) with three types of constraints. + +### Positive constraints `us-.*`. + +Positive constraints are sufficient for most use cases. The allocator would +take care of automatically maximizing diversity of these constraints so the +user doesn't have to worry about having duplicate replicas on the same rack +unless it's unavoidable. If these constraints can't be matched, the allocator +will fall back to the next closest match. + +Since the allocator will maximize diversity, this allows us to have `or` +statements. Specifying `[ssd, mem]`, will have all replicas put on an SSD or +in-memory store since there won't be any stores that are both. Likewise, +specifying `[us-.*, ssd]` will have all replicas in the US and on SSDs. + +### Required constraints `+us-.*` / Prohibited constraints `-us-.*` + +Required and Prohibited constraints are useful for legal situations where you +need a certain data restriction. This may be used for cases where data has to be +local to a country, or shouldn't be in certain ones. The failure mode would be +blocking new replicas from being added while there isn't enough capacity that +matches the constraints. + +### Node Locality + +The suggested format for locality would be a set of keys and values that would +then be diversified across. This could look like +`--locality cloud=gce,country=us,region=west,datacenter=us-west-1,rack=12`. +Constraints would be applied like `[cloud=gce, country=us]`. + +We would try to maximize diversity within each KV tag. This would make it +easier to tell which tags match each other instead of just the index in the +hierarchy list. + +We would use the order the tags were defined in as a hierarchy for which levels to +prioritize hierarchy for. If the order of tags on one node doesn't match those +on others, we would warn the user. In such a case, we will use the order that the +majority of nodes have. + +If we have three datacenters and each datacenter has 3 racks +`datacenter=us-{1,2,3},rack={{1,2,3},{1,2,3},{1,2,3}}` there's no way to +distinguish the primary diversity factor automatically, but we can maximize +diversity on both levels. In this situation, the best thing to do would be to +have 3 replicas +`datacenter=us-1,rack=1`, `datacenter=us-2,rack=2` and `datacenter=us-3,rack=3`. + +While maximizing diversity in a mostly empty cluster is easy to do, as servers +fill up we might be in the situation of putting two replicas on the same rack or +two replicas in the same datacenter. This means that an ordering is needed to +know which tag diversity is more important. + +### Examples + +Prefer SSDs and do not put on in-memory stores. +```yaml +constraints: [ssd, -mem] +``` + +Require data to be stored in the US. +```yaml +constraints: [+country=us] +``` + +In-memory store, and not in aws. +```yaml +constraints: [mem, -cloud=aws] +``` + +## Remove per replica attributes + +Remove the ability to specify per replica attributes as they're a poor stand in +for proper diversity constraints. + +With a first class diversity system we can get rid of the attributes arrays. +This also allows for specifying a candidate sets that are bigger than the exact +number of replicas. + +### Old format + +```yaml +replicas: +- attrs: [us-1] +- attrs: [eur-2] +- attrs: [asia-3] +``` + +### New Format + +```yaml +num_replicas: 3 +constraints: [us-1, eur-2, asia-3] +``` + +## Constraint failure modes + +By default, the allocator will try to maximize the number of constraints it +satisfies. If it can't find a perfect match it'll find the best matching store +and use that one instead. + +The only exception is that for hard constraints and prohibited constraints, +replication will fail and an alert will show up in the admin UI. In the future, +we could try and migrate other ranges off of matching stores to free space and +allow for replication to continue. + +## How do you notify users of constraint failures or issues? + +Any issues with constraints will show up in logs and in the admin UI. While +typically a constraint issue shouldn't break the system, it could operate in a +manner that a user doesn't expect. + +## Debugging + +A simple tool that will allow users to specify constraints and view the matching +stores will be added to the web UI. This will allow users to vet any changes +before actually using them. In addition, it will also highlight any existing +invalid configurations such as out of order locality tags, and failing +constraints. + +# Drawbacks + +One drawback is that the new system prevents explicit control over the diversity +of a replica set. There's no way to say "one ssd and two hdds". + +# Alternatives + +# Unresolved questions + +## Weak negative constraints? + +Might be useful to have weak negative constraints such as `!ssd` that won't stop +allocation such as how a prohibited constraint would. There's currently not a +strong use case for it, but should be fairly easy to add later on. diff --git a/src/current/files/cockroach/docs/RFCS/20200331_enums.md b/src/current/files/cockroach/docs/RFCS/20200331_enums.md new file mode 100644 index 00000000000..e894bdd0620 --- /dev/null +++ b/src/current/files/cockroach/docs/RFCS/20200331_enums.md @@ -0,0 +1,398 @@ +- Feature Name: Enum Data Types in CockroachDB +- Status: accepted +- Start Date: 2020-03-31 +- Authors: Rohan Yadav, Lucy Zhang, Andrew Werner, Jordan Lewis +- RFC PR: #47070 +- Cockroach Issue: #24873 + +# Summary + +This RFC proposes adding enum types to CockroachDB. + +# Background + +Enum types are a class of user defined types where the values in +the type are constrained to a fixed set +of user specified values. The system then ensures type safety over operations +on this type. This includes ensuring that only values that are members of the +enum can be inserted into a column of the enum type, and that enums can only +be compared to other values of the same enum type. For example, consider an +application that needs to store events and the days of the week that they happen. +This application could use an enum to represent the days of the week. + +```sql +CREATE TYPE day ENUM AS ('monday', 'tuesday', 'wednesday'...); +CREATE TABLE events (id INT, dayofweek day); +INSERT INTO events VALUES (1, 'monday'); +``` + + +# Overview + +To implement enum types in CockroachDB, we have to touch many layers +of the system. In particular, we need to introduce a way of storing +metadata about enums durably in the database. We then need a way to +cache this metadata so that lookups on this metadata is fast, as well +as a way to invalidate this cache when enum metadata changes. When +enum metadata changes, we need to ensure that these changes do not +result in some nodes in the cluster entering a situation where +they are unable to process enum values they find. Lastly, we need +to define a physical layout for enums and integrate enums within +the type system and SQL execution stack. + +# Detailed Explanation + +## Metadata Storage + +Enums themselves are a special case of user-defined types. In order +to lay the groundwork for future work in this area, we propose storing +metadata about an enum in a new descriptor called a `TypeDescriptor`. +This descriptor will be added to the descriptor union alongside table and +database descriptors. The descriptor will store metadata about the type, +including the parent database and schema IDs, a unique ID for the type, and +the name of the type. The descriptor will also include specific information +for the kind of type being stored in the descriptor (as of now there +would only be enums). For enums, this information would include the mapping +of the enum's values to their physical representations. A proposal of the +descriptor's contents is below: + +```proto +message TypeDescriptor { + // Used by all kinds of user-defined types. + // Parent database and schema. + uint32 parent_id; + uint32 parent_schema_id; + // ID and Postgres compatible OID of the type. + uint32 id; + uint32 oid; + // Visible name of the type. + string name; + + // Enum specific fields. + message enum_members { + byte[] physical_encoding; + string name; + }; + enum_members[] members; +} +``` + +These descriptors +will be stored in the `system.descriptor` table and will use the leasing +and versioning system being built. There is ongoing work on unifying +the leasing interface so that components are easily shared across +different descriptor types, and we will take advantage of these +systems once they are available. The leasing system will enable caching +and cache invalidation of type descriptors. Until the leasing system +is ready for integration, we will first implement a prototype +that either doesn't use a cache or uses a simple incoherent cache for +`TypeDescriptor` access. + +## Name Resolution + +Enums are scoped within a database and a schema. In Postgres, enums +cannot be accessed from other databases -- they can only be accessed from +different schemas in the same database. However, there is no core reason +that CockroachDB cannot support this. In fact, we might need to support +references of types across databases to be in line with other cross +database references that we currently support. The topic of cross database +references has come up in discussion about +[user defined schemas](https://github.com/cockroachdb/cockroach/pull/48276) +as well. The direction that we take in allowing cross database references +vs allowing only cross schema references will follow what has been decided +in that context. + +Table and type names exist within the same namespace in Postgres. This means +that it is possible to create a type and table of the same name within +the same schema. Additionally, tables in Postgres are types themselves +as a record type where each field is typed like the tables columns. Therefore, +we will store type namespace entries along with table namespace entries +in the `system.namespace` table. This allows namespace conflicts between +types and tables to be properly detected, as well as allowing us to reuse +a large amount of name resolution logic that exists for table name lookup. +This strategy also will allow the user defined types implementation to +adapt to new features like user defined schemas without extra work. + +## ID's and OID's + +All user defined types will need a stable ID that they are uniquely addressable +by from within CockroachDB, as well as an OID that can be used for Postgres +compliant operations. Importantly, the OIDs cannot conflict +with existing type OIDs. Our strategy is to construct OIDs from the stable ID. +In particular, the OID of a user defined type is equal to +`ID + oidext.CockroachPredefinedOIDMax`. This strategy allows us to easily +map back and forth between OIDs and IDs, and avoid using multiple counters for +essentially the same information. The offset ensures that no user defined +types have OIDs that conflict with any preexisting OIDs. This approach will +naturally extend when we allow treating tables as types. + +## Changing Enum Definitions + +There are a few ways that enums can change over time. +* The name can change. +* The schema the enum is in can change. +* A new enum member can be added to the set of values. +* A member in the enum can be renamed. +* The enum can be dropped. + +In order to rename an enum or a value in an enum can be done with a write +to the enum descriptor and then waiting for all nodes to agree on the new value. +There are plans to lift operations on descriptor names off of the individual +descriptors, because such operations are common to all of them. This work +would involve moving the draining names off of descriptors as well. It's +possible that this work would be part of or take advantage of this effort. + +The case of adding a new enum element is more difficult. The key difficulty comes +from ensuring that a node does not attempt to translate a physical layout that it +does not know about yet into a user facing representation of the enum. If we naively +just add the new enum value to the enum metadata, it is possible that another node +reads a newly written enum from disk and is unsure how to decode it. Consider the +following sequence of events: +* Node 1 receives a new enum element `foo` to its enum descriptor and blocks on + `WaitForOneVersion` +* Node 2 receives the new enum descriptor update and writes a value with `foo` +* Node 3 tries to read the value of `foo` before receiving the update to + its enum descriptor. + +In order to avoid these situations, we propose an extension of the strategy +used for performing online schema changes. As a reminder, when we add a new +schema object to a table, it moves through a series of states before becoming +usable. As the object moves through these states, the types of operations +that are allowed upon the object change. Between each state, we require that +all nodes in the cluster agree on the new version of the schema object. +For more details, refer to the +[online schema changes RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20151014_online_schema_change.md). +We propose a similar state +progression to adding new elements to an enum type. +1. When a new value is added +to an enum, it is instead placed into a "read only" state. +2. After all nodes agree on the "read only" state, the new enum value +is promoted into the set of writeable values in the enum. + +This process ensures that all nodes know +about all potential enum values before they have a chance to be written. +This approach has the drawback of not being able to add an enum value and +then insert that value in the same transaction. This drawback is similar +to our existing limitation of not being able to add a column and insert +into it in the same transaction. + +This enum schema change will be implemented with a new job, rather than +trying to build off of the existing table schema changer. While conceptually +similar to a table schema change, there is not much implementation to share. +This new job will +1. Collect all "read only" enum values and wait for one version in the cluster. +2. Transition these values to "public", and then wait for one version in the cluster. + +A rollback of this job can just remove the "read-only" values. +Additionally, enums don't really need a concept of mutations like tables. The +members of an enum in the enum's `TypeDescriptor` can be tagged with whether +the member is "read only" or public. + +In Postgres, if an enum is dropped without `CASCADE`, the operation will not succeed +if there are any tables that use the enum. If an enum is dropped with +`CASCADE`, all dependent columns are dropped as well. If the database +that an enum is created within is dropped, then the enum +is dropped as well. In order to maintain this information, the +descriptors that represent an enum need to hold back-references to +the tables and columns that use them. We expect the descriptor leasing +system being developed to manage invalidation of cached enums when enums +are destroyed in these cases. + +## Physical Layout + +At first, it may seem that a valid implementation of enum values is +to map each to an integer, and then store these integers on disk. +This implementation seems like it would supply all the ordering +guarantees needed of enums. However, Postgres allows for adding +new enums and specifying the order of the newly created enum +with respect to an existing value of the enum. This looks like: +```sql +CREATE TYPE t ENUM AS ('v1', 'v2'); +ALTER TYPE t ADD VALUE 'v1.5' AFTER 'v1' +``` +This means add the value `v1.5` to the enum `t` and order it +after the value `v1`. Using just integers as the backing value +for enums would not allow us to handle this sort of case. +Postgres implements this feature on enums by storing a sorting +order for enums as a float. When a new value is added like this, +Postgres takes the sort orders of the enums that the new enum is +being inserted in between, and creates a float that bisects the +range between the two orders. Concretely, if `v1` had a sort order +of `1.5` and `v2` had a sort order of `2.0`, then `v1.5` would be +inserted with a sort order of `1.75`. However, once the floating +point precision limit has been reached, Postgres rewrites all +sort orders to integral values. Postgres can do this because it +doesn't require a stable disk encoding for enums. In our case, +we need to have a stable encoding to store data on disk if an enum +is used in an index, and cannot afford to rewrite all tables using an +enum if the enum sort order has changed. + +We propose a different strategy that is related to this idea of +bisecting ranges, but doesn't suffer from problems due to floating +point arithmetic precision. The general idea is to use byte arrays +to hold the sort order of our enums, and reserve some bytes in the +arrays to create the ordering that we need. In particular we reserve +the minimum byte (`0`) and have a maximum allowed byte. In practice +this will be `255`. An example of the encoding scheme is below. + +Assume we started with 3 elements (`a`, `b`, `c`), and let the maximum byte value be 3. +The sort order byte arrays for each element would be: +``` +a 1/ +b 2/ +c 3/ +``` +To add an element after `b` we can create a new key that sits in the middle of the range +between `b` and `c`. +``` +a 1/ +b 2/ +d 2/2/ +c 3/ +``` +Now lets add more values before `d`. The first one is easy: +``` +a 1/ +b 2/ +e 2/1/ +d 2/2/ +c 3/ +``` +The tricky case is adding a value before `e`. Because we reserved the minimum byte, we can +append it and then bisect the range again. +``` +a 1/ +b 2/ +f 2/0/2 +e 2/1/ +d 2/2/ +c 3/ +``` +This strategy can be extended indefinitely as long as this pattern is followed to reserve +the minimum byte. A prototype of the exact algorithm is included as part of the RFC PR. + +This sort order byte array will be the physical layout and identifier of the enum. We expect +that for small enums only a byte or two will be used to hold all the values, and that our +compression strategies at the storage layer will compress this data well. + +Since the common case of adding members to an enum is to add a member at the beginning +or end of the set of values, we can adjust the algorithm slightly to better +handle this case. When generating a new key byte where one of the endpoints is +the min or max element, the algorithm can add or subtract a small constant from +the existing key rather than bisecting the range. This allows for adding many +more elements to the beginning or end of the range without increasing the +number of bytes used to store the enum. The algorithm can be found implemented in +[this PR](https://github.com/cockroachdb/cockroach/pull/47939). + +## Parsing + +Currently, the CockroachDB grammar is not equipped to handle type names +that are qualified due to changes made in the past that separated parsing of +object and type identifiers. Some of these changes will have to be +reverted/adapted in order to allow for types to have qualifications again. +The work to allow the parser to recognize qualified names has been done in +[this PR](https://github.com/cockroachdb/cockroach/pull/47216). + +## Type System Changes + +The type system of CockroachDB currently makes an assumption that anywhere +a type is present in an AST, that type is statically known. In code, this +means that every AST object that holds a type (like a `CastExpr` or +`ColumnDef`) holds a `*types.T`, which is constructed at parse time. +As part of implementing user defined types, the type system must be taught +that all types are no longer statically known. The general idea is to change +the types in AST nodes to a new interface representing an unresolved type +reference. These type references can then be resolved into `*types.T` through +type resolution. Additionally, we must enforce that types are only attempted +to be accessed after type checking, when all type references have been resolved. +A prototype of this approach can be found in +[this PR](https://github.com/cockroachdb/cockroach/pull/47386). + +After the process of type resolution, enums need a `types.T` for interaction +with other components of the system. We will introduce a new family for enums, +and the `types.T` for an enums will contain the stable ID for the +`TypeDescriptor` that backs the type. The `types.T` will also contain extra +fields for an enum like the mapping of names to values. Importantly, these +extra fields will not be serialized as part of the proto. Instead, when a +type is resolved, the returned `*types.T` will be hydrated to populate these +fields. + +A potential option was to avoid using +a `TypeDescriptor` and instead just extend the `types.T` proto to contain +necessary fields for user defined types. However, this is not feasible because +the `types.T` proto's are stored on disk in various descriptors. It is too +expensive to update all descriptors that contain a type every time the type +is altered. + +A new `Datum` `DEnum` will be introduced to represent values of the +enums at runtime. A `DEnum` will store the physical representation of the +enum as well as the hydrated `*types.T` of its type. The extra fields in the +`*types.T` that hold information about enum values will be used for datum +operations without the need to thread ID resolution capabilities to evaluation +of operations on datums. + +When a user-defined type is created in Postgres, Postgres will automatically +create an alias for an array of the new type. For example, if a user creates +a type `days`, the system would also create the type `_days` as an alias for +`days[]`. This type tracks changes made to the referenced type as it +moves through schemas and is dropped. + +## Semantic Analysis Changes + +The optimizer will need to be taught about the check constraint implied by +a column being of an enum type. Additionally, it will need to be taught how +to convert enum values from their input string representation into their +`Datum` physical representation. + +The `Catalog` that is used by the optimizer will need to be extended to support +resolution of types. The way that the catalog represents user defined types is +important for invalidation of cached plans. If a type is updated, all plans +containing data sources using the type need to be invalidated. + +## DistSQL +The gateway node that plans a SQL query has access to all resolved type +information for the query. Remote nodes that different parts of the query +are planned on need access this information in order to correctly execute +the query. In particular, these nodes need to hydrate their `*types.T` +containers with metadata and they need to parse and type check serialized +expressions. The hydration of `*types.T` objects can be done at operator +initialization. The trickier problem is type checking serialized expressions -- +we don't want to pay the cost of name resolution again. Our strategy is to +serialize user defined type references with their OIDs similar to how column +references are serialized. All explicit references to user defined types (i.e. +in casts or user defined type value literals) will be serialized like `@`. +The expression initialization process will resolve these OID references to the +correct `TypeDescriptor`. To actually resolve these references, we access the set +of leased descriptors through a `descs.Collection` that is initialized for each +flow. + +# Alternatives + +## Namespacing and Metadata Storage +During discussion of the RFC, some alternatives were debated. In particular, +the ideas of using a separate namespace table for types and/or a separate +descriptor table for metadata storage. The benefit of a separate namespace +table is that it has the potential of making future work in allowing tables +to be interpreted as types more straightforward. However, using a separate +namespace table complicates existing name resolution and conflict detection +strategies. A separate descriptor table allows for scans over all tables or +types to not have to touch descriptors of different types, which is a +performance improvement for catalog table operations. However, this problem +is somewhat orthogonal to this work, and would be better solved by building +some sort of indexing structure on the `system.descriptor` table. +Using the existing namespace table allows most of the existing name resolution +code to be used directly, and using the same descriptor table allows for +leasing primitives to be built on only one system table. + +## Overall Alternative +One alternative approach to this physical layout was to store just an +enum ID on disk, and store ordering and representation information in +a separate lookup table. When operations like on enums would involve +joining or rendering the enums, a join would be produced against this +reference table. This allows for easy changing of enum data, but +results in a variety of complexity during planning. + +# Unresolved questions + +It is unclear what interactions will arise between this work and the +planned/ongoing work with user defined schemas. diff --git a/src/current/files/cockroach/docs/RFCS/20200811_non_blocking_txns.md b/src/current/files/cockroach/docs/RFCS/20200811_non_blocking_txns.md new file mode 100644 index 00000000000..fdb5d3841de --- /dev/null +++ b/src/current/files/cockroach/docs/RFCS/20200811_non_blocking_txns.md @@ -0,0 +1,1096 @@ +- Feature Name: Non-Blocking Transactions +- Status: in-progress +- Start Date: 2020-08-11 +- Authors: Nathan VanBenschoten +- RFC PR: #52745 +- Cockroach Issue: None + +# Summary + +Non-blocking transactions are a variant of CockroachDB's standard read-write +transaction protocol that permit low-latency, global reads of read-mostly and +read-only (excluding maintenance events) data. The transaction protocol and the +replication schema that it is paired with differ from standard read-write +transactions in two important ways: +- non-blocking transactions support a replication scheme over Ranges that they + operate on which allows all followers in these Ranges to serve **consistent** + (non-stale) follower reads. +- non-blocking transactions are **minimally disruptive** to reads over the data + that they modify, even in the presence of read/write contention. + +The ability to serve reads from follower and/or learner replicas is beneficial +both because it can reduce read latency in geo-distributed deployments and +because it can serve as a form of load-balancing for concentrated read traffic +in order to reduce tail latencies. The ability to serve **consistent** +(non-stale) reads from any replica in a Range makes the functionality accessible +to a larger class of read-only transactions and accessible for the first time to +read-write transactions. + +The ability to perform writes on read-heavy data without causing conflicting +reads to block is beneficial for providing predictable read latency. Such +predictability is doubly important in global deployments, where the cost of +read/write contention can delay reads for 100's of ms as they are forced to +navigate wide-area network latencies in order to resolve conflicts. + +These properties combine to prioritize read latency over write latency for some +configurable subset of data, recognizing that there exists a sizable class of +data which is heavily skewed towards read traffic. + +Non-blocking transactions are provided through extensions to existing concepts +in the CockroachDB architecture (i.e. uncertainty intervals, read refreshes, +closed timestamps, learner replicas) and compose with CockroachDB's standard +transaction protocol intuitively and effectively. + +This proposal serves as an alternative to the [Consistent Read Replicas +proposal](https://github.com/cockroachdb/cockroach/pull/39758). Whereas the +Consistent Read Replicas proposal enforces consistency through communication, +this proposal enforces consistency through semi-synchronized clocks with bounded +uncertainty. + +# Motivation / Making Global Easy + +Various efforts over the past two years have recognized the need for a +simplified and more prescriptive approach towards global deployment of +CockroachDB. These efforts have called out a lack of high-level abstractions, +incomplete topology patterns, and missing architectural support for read-heavy +global (non-localized) data as blockers towards this goal. + +Working within the framework discussed in the [CockroachDB Multi-Region +Abstractions](https://docs.google.com/document/d/16ON-HPv5qFjK4sXuJQRqI1prfZYh4sBBAa2F0gkPkwQ/edit?usp=sharing) +document, this proposal identifies non-blocking transactions as the answer to +this missing architectural component and a foundation upon which we can build +upon to define sensible topology patterns and provide much needed high-level +abstractions. + +The CockroachDB Multi-Region Abstractions doc suggested that SQL tables are +split into two categories: "geo-partitioned" and "reference". It then discussed +a "hub-and-spokes" replication topology, wherein each Range in the system is +composed of a limited set of nearby voter replicas, whose diameter is based on +the desired failure tolerance (e.g. zone or region failure tolerance), combined +with one or more learner replicas in every other region. This topology minimizes +consensus latency while establishing a covering of data across all regions to +minimize read latency, subject to transaction constraints. Working within this +model, this RFC considers non-blocking transactions to be the key architectural +advancement needed to support "reference" tables. + +With non-blocking transactions and a hub-and-spokes replication topology, the two +categories of SQL tables have the following behavior: + +| | geo-partitioned | reference | +|--------------------------------------|-------------------------|---------------------------| +| data locality | local | global | +| data access | read-often, write-often | read-mostly, write-rarely | +| local read latency | fast | N/A | +| local write latency | fast | N/A | +| remote/global read latency | slow, fast if stale | fast | +| remote/global write latency | slow | slow | +| reads block on local writes | yes, fast | N/A | +| writes block on local writes | yes, fast | N/A | +| reads block on remote/global writes | yes, slow | no, fast | +| writes block on remote/global writes | yes, slow | yes, slow | + +_where: fast < 5ms, slow > 100ms_ + +We can see that data within geo-partitioned tables remains fast to read and +write within its local region, but slow to access from remote regions. Remote +reads in read-only transactions have the opportunity to downgrade to a lower +consistency level (i.e. exact or bounded staleness reads) to improve latency, +but read-write transactions that read from or write to remote data are slow. +Meanwhile, data within reference tables is fast to read from any region, even +within read-write transactions and even with read/write contention. However, +data within reference tables is consistently slow to write to from any region. + +This structure makes it easy to identify which category a given table falls +into. If its data has geographic access locality then it should be set to +"geo-partitioned". If not, then it should be set to "reference". + +This effectively handles all forms of data except that which has no access +locality and is also write-heavy. Such data is fundamentally incompatible with +low-latency modifications under linearizability across large geographic +distances. These access patterns require either synchronous coordination or +weakened [consistency levels](https://en.wikipedia.org/wiki/Eventual_consistency) and limited +[operational generality](https://en.wikipedia.org/wiki/Conflict-free_replicated_data_type). +Both of these compromises make global data harder to work with and appear +antithetical to the goal of "making global easy", so we deem the communication +latency necessary for linearizable writes to global data to be an acceptable cost. + +To summarize, this proposal considers non-blocking transactions to be a key +architectural advancement necessary to support low-latency reads of global data, +which itself is a must-have for many, if not most, global deployments. + +## What's wrong with follower reads? + +Closed timestamps and [follower +reads](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20180603_follower_reads.md) +provide a mechanism to serve *consistent stale reads* from follower replicas of +a Range without needing to interact with the leaseholder of that Range. There +are two primary reasons why a user of CockroachDB may want to use follower +reads: +1. to avoid wide-area network hops: If a follower for a Range is in the same + region as a gateway and the leaseholder for that Range is in a separate + region as the gateway, follower reads provides the ability to avoid an + expensive wide-area network jump on each read. This can dramatically reduce + the latency of these reads. +2. to distribute concentrated read traffic: Range splitting provides the ability to + distribute heavy read and write traffic over multiple machines. However, Range + splitting cannot be used to spread out concentrated load on individual keys. + Follower reads provides a solution to the problem of concentrated read + traffic by allowing the followers of a Range, in addition to its leaseholder, + to serve reads for its data. + +However, this capability comes with a large asterisk. Follower reads are only +suitable for serving _**historical**_ reads from followers. They have no ability +to serve consistent reads at the current time from followers. Even with +[attempts](https://github.com/cockroachdb/cockroach/pull/39643) to reduce the +staleness of follower reads, their historical nature will always necessarily +come with large UX hurdles that limit the situations in which they can be used. + +The most damaging of these hurdles is that, for all intents and purposes, follower +reads cannot be used in any read-write transaction - their use is limited to read-only +transactions. This dramatically reduces their usefulness, which has caused us to +look for other solutions to this problem, such as [duplicated indexes](https://www.cockroachlabs.com/docs/stable/topology-duplicate-indexes.html) +to avoid WAN hops on foreign key checks. + +Another hurdle is that the staleness they permit requires buy-in, so accessing +them from SQL [requires application-level changes](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20181227_follower_reads_implementation.md). +Users need to specify an `AS OF SYSTEM TIME` clause on either their statements +or on their transaction declarations. This can be awkward to get right and +imposes a large cost on the use of follower reads. Furthermore, because a +statement is the smallest granularity that a user can buy-in to follower reads +at, there is a strong desire to support [mixed timestamp statements](https://github.com/cockroachdb/cockroach/issues/35712), +which CockroachDB does not currently support. + +Because of all of these reasons, follower reads in its current form remains a +specialized feature that, while powerful, is also not usable in a lot of +important cases. It's becoming increasingly clear that to continue improving +upon our global offering, we need a solution that solves the same kinds of +problems that follower reads solves, but for all forms of transaction and +without extensive client buy-in. This RFC presents a proposal that fits within a +spectrum of possible solutions to address these shortcomings. + +## What's wrong with the "duplicate index" pattern? + +CRDB's current "solution" for people wanting local consistent reads is the +[duplicated index pattern](https://www.cockroachlabs.com/docs/stable/topology-duplicate-indexes.html). +This RFC proposes replacing that pattern with capabilities built into the lower +layers of the system, on the argument that it will result in a simpler, cheaper +and more reliable system. This section discusses the current pattern (in +particular, its problems). + +The duplicate indexes idea is that you create a bunch of indexes on your table, +one per region, with each index containing all the columns in the table (using +the `STORING ,...` syntax). Then you create a zone config per region, +pinning the leaseholders of each respective index to a different region. Our query +optimizer is locality-aware, and so, for each query trying to access the table +in question, it selects an index collocated with the gateway in the same +locality. We're thus taking advantage of our transactional update semantics to +keep all the indexes in sync and get the desired read latencies without any help +from the KV layer. + +While neat, there's some major problems with this scheme which make it a tough +proposition for many applications. Some of them are fixable with more work, at +an engineering cost that seems greater than the alternative in this RFC, others +seem more fundamental assuming no cooperation from KV. + +### Problem 1: Read Latency under Contention + +Without contention, reads using the duplicate index pattern are served locally +at local latencies. With an index + pinned leaseholder in each region, any read +on a "duplicate index" table can be served in a few milliseconds without a WAN +hop. Better yet, these reads are not stale, so they can be comfortably used in +read-only and read-write transactions (often during foreign key checks). + +But these local read latencies can provide a false sense of performance +predictability. While the duplicate index pattern optimizes for read latency, it +badly pessimizes write latency. On the surface, this seems to be the expected +tradeoff. The problem is that when reads and writes contend, reads can get +blocked on writes and have to wait for writing transactions to complete. If the +writing transaction is implicit, this results in at least 1 WAN RTT of blocking. +If the writing transaction is explicit, this results in at least 2 WAN RTTs of +blocking. + +Reads that users expect to be fast (1ms-3ms) can quickly get two orders of +magnitude slower (120ms-240ms or more) due to any read/write contention. As +we've seen, this can be a shocking violation of expectations for users. + +Over the past few months, we've seen users recognize this issue, both with the +duplicate index pattern and with standard CockroachDB transactions. The +corresponding ask has often been to support a `READ COMMITTED` isolation level. +Our response is to perform stale reads using `AS OF SYSTEM TIME` to avoid +contention, but this pushes the burden onto the readers to accept stale data in +exchange for more predictable read latency. This seems backwards, as avoiding +blocking should really be the responsibility of the infrequent writers. It also +only works if the readers are read-only transactions, as read-write transactions +cannot use `AS OF SYSTEM TIME`, which removes a large class of use cases like +foreign key checks in read-write transactions. + +### Problem 2: Ergonomics + +So you’ve got 15 regions and 20 of these global tables. You’ve created 15x20=300 +indexes on them. And now you want to take a region away, or add a new one. You +have to remember to delete or add 20 indexes. Tall order. And it gets better: +say you want to add a column to one of the tables. If, naively, you just do your +`ALTER TABLE` and add that column to the primary key, then, if you had any +`SELECT *` queries (or queries autogenerated by an ORM) which used to be served +locally from all regions, now all of a sudden all those indexes we’ve created +are useless. Your application falls off a cliff because, until you fix all the +indexes, all your queries travel to one central location. The scheme proves to +be very fragile, breaking surprisingly and spectacularly. + +What would it take to improve this? Focusing just on making the column +components of each index eventually consistent, I guess we'd need a system that +automatically performs individual schema changes on all the indexes to bring +them up to date, responding to table schema changes and regions coming and +going. Tracking these schema changes, which essentially can't be allowed to +fail, seems like a big burden; we have enough trouble with sporadic +user-generated schema changes without a colossal amplification of their number +by the system. We'd also presumably need to hide these schema changes from the +user (in fact, we need to hide all these indexes too), and so a lot of parts of +the system would need to understand about regular vs hidden indexes/schema +changes. That alone seems like major unwanted pollution. And then there's the +issue of wanting stronger consistency between the indexes, not just eventual +consistency; I'm not sure how achievable that is. + +### Problem 3: Fault tolerance + +You’ve got 15 regions - so 15 indexes for each reference table - and you’ve used +replication settings to “place each index in a region”. How exactly do you do that? +You've got two options: +1. Set a constraint on each index saying `+region=`. The problems with this +is that when one region goes down (you’ve got 15 of them, they can’t all be +good) you lose write-availability for the table (because any writes needs to +update all the indexes). So that's no good. + +2. Instead of setting a constraint which keeps all of an index’s replicas in one +region, one can let the system diversify the replicas but set a “leaseholder +preference” such that reads of the index from that region are fast, but the +replicas of the index are more diverse. You got to be careful to also constrain +one replica to the desired leaseholder region, otherwise the leaseholder +preference by itself doesn't do anything. Of course, as things currently stand, +you have to do this 15 times. This configuration is resilient to region failure, +but there's still a problem: any leaseholder going away means the lease goes out +of region - thus making reads in the now lease-less region non-local (because we +only set up one replica per region). + +So, all replicas in one region is no good, a single replica in one region is not +good, and, if the replication factor is 3, having 2 replicas in one region is +also not good because it’s equivalent to having all of them in one region. What +you have to do, I guess, is set a replication factor of at least 5 *for each +index* and then configure two replicas in the reader region (that’s 3x5=15 +replicas for every index for a 3-region configuration; 10x5=50 replicas in a +10-region configuration). In contrast, with this RFC's proposal, you'd have 2 +replicas per region (so 2 per region vs 5 per region). Our documentation doesn't +talk about replication factors and fault tolerance. Probably for the better, +because these issues are hard to reason about if you don't spend your days +counting quorums. This point shows the problem with doing such replication +schemes at the SQL level: we hide from the system the redundancy between the +indexes (and between the replicas of different indexes), and so then we need to +pay a lot to duplicate storage and replication work just to maintain the +independent availability of each index. + +# Working Assumptions + +The bulk of this proposal is founded on the idea that at some time in the +future, we will be able to dramatically reduce the clock uncertainty that we +feel comfortable running with, either across the board or at least within +CockroachCloud. + +Wide area network (WAN) round-trip time (RTT) latencies can be as large as 200ms +in the global deployment scenarios that we are interested in (ref: +[AWS](https://docs.aviatrix.com/_images/inter_region_latency.png), +[Azure](https://docs.aviatrix.com/_images/arm_inter_region_latency.png), +[GCP](https://docs.aviatrix.com/_images/gcp_inter_region_latency.png)). The +current default clock uncertainty offset is 500ms. This proposal explores the +half of the design space in which clock uncertainty bounds drop below WAN RTT +time. Within this regime, it becomes cheaper to establish causality between +events using clocks and waiting than by communicating with distant nodes. + +As such, the uncertainty bounds necessary for this proposal to make sense are +much lower than what we currently use, but are also well within the realm of +possibility for software-only solutions. Clouds are continuing to invest into +the software-level time synchronization services that they provide. For +instance, Amazon released a [Time Sync +Service](https://aws.amazon.com/about-aws/whats-new/2017/11/introducing-the-amazon-time-sync-service/) +in 2017 that strives to provide reliable time references for EC2 instances. +Similarly, software-level time synchronization has seen interest from the +academic community in recent years [4], with research even leading to +[productionized service offerings](https://www.ticktocknetworks.com/) in some +cases. Lastly, users are already running CockroachDB with battle-tested time +synchronization services like [Kronos](https://github.com/rubrikinc/kronos) +which we may eventually explore using or extending. + +All of this means that dramatically reducing the clock uncertainty bounds that +we run with seems realistic over the next few years. Combined with that fact +that this proposal makes sense even for fairly large clock uncertainty values +(up to 200ms), it is safe to say that atomic clocks and TrueTime are not +prerequisites for this proposal! + +For the rest of this proposal, we will assume that the clock uncertainty offset +is configured to 30ms. We will also assume that the global cluster topologies +that we are interested in have diameters of 120ms RTTs. The proposal will still +work with uncertainty offsets larger than this value, but with a larger offset, +writers and contended readers (those that observe values in their uncertainty +interval) will be forced to wait longer in some situations. So while a low clock +uncertainty offset is not a prerequisite for this proposal, one does more +effectively demonstrate the benefits that the proposal provides. + +# Guide-level explanation + +## Non-Blocking Transactions + +The RFC introduces the term "non-blocking transaction", which is a variant of +the standard read-write transaction that performs locking in a manner such that +contending reads by other transactions can avoid waiting on its locks. + +The RFC then introduces the term "non-blocking Range", which is a Range in which +any transaction that writes to it is turned into a non-blocking transaction, if +it is not one already. In doing so, these non-blocking Ranges are able to +propagate [closed timestamps](20180603_follower_reads.md) in the future of +present time. The effect of this is that all replicas in a non-blocking Range +are expected to be able to serve transactionally-consistent reads at the present +time. In this sense, all followers and all learners (non-voting followers) in a +non-blocking Range implicitly behave as "consistent read replicas". + +In the reference-level explanation, we will explore how non-blocking +transactions are implemented, how their implementation combines with +non-blocking Ranges to provide consistent read replicas, how they interact with +other standard read-only and read-write transactions, and how they interact with +each other. + +For now, it is sufficient to say that "non-blocking transactions" are hidden +from DML statements. Instead, users interact with "non-blocking Ranges" by +choosing which key ranges should consist of non-blocking Ranges. These Ranges +are configured using zone configurations, though we'll likely end up adding a +nicer abstraction on top of this (e.g. `CREATE REFERENCE TABLE ...`). + +In the future, we could explore running non-blocking transactions on standard +Ranges, which would still provide the "non-blocking property" (i.e. less +disruptive to conflicting reads) without providing the "consistent reads from +any follower" property. However, doing so is not explored in this RFC. + +## Example Use Cases + +### Reference Tables + +It is often the case that a schema contains one or more tables composed of +immutable or rarely modified data. These tables fall into the category of "read +always". In a geo-distributed cluster where these tables are read across +geographical regions, it is highly desirable for reads of the tables to be +servable locally in all regions, not just in the single region that the tables' +leaseholders happens to land. A common example of this arises with foreign keys +on static tables. + +Until now, our best solution to this problem has been [duplicated indexes](https://www.cockroachlabs.com/docs/v19.1/pology-duplicate-indexes.html). +This solution requires users to create a collection of secondary SQL indexes that +each contain every single column in the table. Users then use zone configurations +to place a leaseholder for each of these indexes in every geographical locality. +With buy-in from the SQL optimizer, foreign key checks on these tables are able +to use the local index to avoid wide-area network hops. + +A better solution to this problem would be to use non-blocking Ranges for these +tables. This would reduce operational burden, reduce write amplification, avoid +blocking on contention, and increase availability. + +### Read-Heavy Tables + +In addition to tables that are immutable, it is common for schemas to have some +tables that are mutable but only rarely mutated. An example of this is a user +profile table in an application for a global user base. This table may be read +frequently across the world and yet it is expected to be updated infrequently. + +## Transaction Latency Comparison + +To get a feel for the interactions between standard and non-blocking +transactions, we estimate the latency that different transaction types would see +with and without contention. Here, we continue to make the clock synchronization +and geographic latency [assumptions](#Working-Assumptions) we made earlier (30ms +uncertainty, 120ms WAN RTT). + +We compare under two different cluster topologies. The first is a "hub and +spokes" cluster topology that places long-lived learners for every Range in each +region but keeps a Range's write quorum within a region (~5ms replication). The +second is a "global replication" topology that places a voter for every Range in +each region, which increases replication latency to 120ms. We also add a fixed +3ms latency for the evaluation of each transaction. + +We assume the remote reads are able to read from follower replicas, except where +otherwise specified. + +### Hubs and Spokes + +| | without contention | contending with standard txn | read contending with non-blocking txn | write contending with non-blocking txn | +|------------------------------------------------------|-----------------|--------------------|------------------|------------------| +| local read-only txn | 3ms | 21ms (3+8+10) | 33ms (3+30) | N/A | +| remote read-only txn | 3ms | 141ms (3+8+10+120) | 33ms (3+30) | N/A | +| remote read-only txn (no follower reads) | 123ms (3+120) | 141ms (3+8+10+120) | N/A | N/A | +| read-write txn with local reads | 8ms (3+5) | 26ms (8+8+10) | 38ms (8+30) | 138ms (8+130) | +| read-write txn with remote reads | 8ms (3+5) | 146ms (8+8+10+120) | 38ms (8+30) | 138ms (8+130) | +| read-write txn with remote reads (no follower reads) | 128ms (3+5+120) | 146ms (8+8+10+120) | N/A | N/A | +| non-blocking read-write txn | 138ms (3+5+130) | 156ms (138+8+10) | 156ms (138+8+10) | 156ms (138+8+10) | + +NOTE: 8+10 in various places is read-write txn's sync contention footprint plus +its async contention footprint. + +NOTE: assume `non_blocking_duration` is equal to RTT/2 + uncertainty + some padding = 130ms. + +### Global Replication + +| | without contention | contending with standard txn | read contending with non-blocking txn | write contending with non-blocking txn | +|------------------------------------------------------|-------------------|-------------------------|---------------------|---------------------| +| local read-only txn | 3ms | 366ms (3+123+240) | 33ms (3+30) | N/A | +| remote read-only txn | 3ms | 486ms (3+123+240+120) | 33ms (3+30) | N/A | +| remote read-only txn (no follower reads) | 123ms (3+120) | 486ms (3+123+240+120) | N/A | N/A | +| read-write txn with local reads | 123ms (3+120) | 486ms (123+123+240) | 153ms (123+30) | 253ms (123+130) | +| read-write txn with remote reads | 128ms (3+120) | 606ms (123+123+240+120) | 153ms (123+30) | 253ms (123+130) | +| read-write txn with remote reads (no follower reads) | 243ms (3+120+120) | 606ms (123+123+240+120) | N/A | N/A | +| non-blocking read-write txn | 253ms (3+120+130) | 501ms (138+123+240) | 501ms (138+123+240) | 501ms (138+123+240) | + +NOTE: 123+240 in various places is read-write txn's sync contention footprint plus +its async contention footprint. + +# Reference-level explanation + +## Detailed design + +High-level overview: + +- Non-blocking transactions read and write at `non_blocking_duration` in the + future of present time. +- After committing, they wait out the `non_blocking_duration` before + acknowledging the commit to the client. +- Conflicting non-stale readers ignore future writes until they enter their + uncertainty interval, after which they wait until the write's timestamp is + above the reader's clock before reading the value (maximum wait of max clock + offset = 30ms). +- non-blocking Range leaseholders close out timestamps in the future of present + time. +- non-blocking Range followers receive these future-time closed timestamps and + can serve present time follower reads. +- KV clients are aware that they can route read requests to any replica in a + non-blocking Range. + +### Aside: "Present Time" and Uncertainty + +Today, all transactions in CockroachDB pick a provisional commit timestamp from +their gateway's local HLC when starting. This provisional commit timestamp is +where the transaction performs its reads and where it initially performs its +writes. For various reasons, a transaction may move its timestamp forward, which +eventually dictates its final commit timestamp. However, a standard transaction +maintains the invariant that its commit timestamp is always lagging "present" +time. + +The meaning of present time is somewhat ambiguous in a system with +semi-synchronized clocks. For the purpose of this document, we can define it as +the time observed on the node in the system with the fastest clock. In a well +behaved cluster that respects its configured maximum clock offset bounds, +present time must be no more than `max_offset` in the future of the time +observed on any other node in the system. This guarantee is the foundation upon +which uncertainty intervals enforce single key linearizability within +CockroachDB. If a write were to commit from the fastest node in the cluster at +"present time", a causally dependent read from the slowest node would be +guaranteed to see the write either at a lower timestamp than its provisional +timestamp or at least in its uncertainty interval. Therefore, if reading +transactions makes sure to observe all values in their uncertainty interval, +stale reads are not possible and linearizability is preserved. + +### Writing in the Future + +As defined above, "non-blocking transactions" perform locking in such a way that +contending reads do not need to wait on their locks. In practice, this means +that non-blocking transactions **perform their writes at a timestamp in advance +of present time**. In a sense, we can view this as scheduling a change to happen +at some time in the future. + +The result of this is that the values written by a non-blocking transaction are +committed and their locks removed (i.e. intents resolved) by the time that a +read of conflicting keys needs to observe the effect of a non-blocking +transaction. If the locks protecting a non-blocking transaction are removed by +the time that its effects drop below the read timestamp of a conflicting read +then the read can avoid the questions: "Did this conflicting transaction commit? +Should I trust its effects?". Instead, it can simply go ahead and read the value +(\*) because it knows that the transaction committed, no coordination required. + +This need for coordination to determine whether a read should observe the +effects of a conflicting write is the fundamental reason why writes can be so +disruptive to conflicting reads. We saw that in [Problem 1: Read Latency under +Contention](#Problem-1:-Read-Latency-under-Contention). A read that conflicts +with the lock and the provisional value of a write cannot determine locally +whether that lock and value are pending, committed, or aborted. Instead, it +needs to reach out to that transaction's record. Transaction priorities dictate +whether the read waits or whether it can force a pending writer out of its way. +Either way, coordination is required, and as a result, the read may need to +perform a remote network hop (or several). Depending on the location of the +conflicting write's transaction record, this network hop may be over the WAN and +may cost 100's of ms. + +By scheduling the writes sufficiently far in the future, we ensure that reads +never observe locks or provisional values, only committed values that have been +scheduled to go into effect at a specific point in time. Coordination on the +part of the read is no longer required. As a result, non-blocking writes are no +longer disruptive to conflicting reads. + +\* Things are never quite this easy. See [Uncertainty Intervals: To Wait or Not to Wait](#Uncertainty-Intervals:-To-Wait-or-Not-to-Wait). + +#### Aside: Communication vs. Conflict Boundaries + +It is interesting to note that this approach of moving "communication outside of +conflict boundaries" is present in other attempts to provide serializable, +low-latency, geo-replication transactions. Specifically, it is found in +deterministic database architectures like SLOG, which establish an ordering of +transactions before evaluating their result [5]. In doing so, transactions avoid +wide-area network latencies while holding locks or blocking other transactions, +minimizing their contention footprint and maximizing throughput under +contention. + +Interactive transactions are largely incompatible with deterministic execution, +as they submit transactions to the database in pieces. However, we can still +recover some of the benefits of deterministic execution by writing in the +future. Specifically, writing in the future moves WAN communication outside of +read/write conflict boundaries. However, it does not move the WAN communication +outside of write/write conflict boundaries. + +#### Synthetic Timestamps + +Writing in the future is new for CockroachDB. In fact, talking about time in the +future in any capacity has been traditionally frowned upon. Instead, we try to +only ever pass around HLC timestamps that were pulled from a real HLC clock. +This ensures that if the timestamp is ever used to +[Update](https://github.com/cockroachdb/cockroach/blob/bbbfdbd1919c6de411742c8442cfc3903d33ee86/pkg/util/hlc/hlc.go#L326-L332) +an HLC clock, the resulting clock is guaranteed to still be within the +`max_offset` of all other nodes. + +So we need to be careful with deriving future timestamps from timestamps pulled +from an HLC. To that end, this RFC proposes the introduction of "synthetic +timestamps". Synthetic timestamps are identical to standard timestamps (64 bit +physical, 32 bit logical), except that they make no claim about the value of the +clock that they came from, or even that they came from a clock at all. As such, +they are allowed to be arbitrarily disconnected from the clocks in the system. + +##### Representation + +To distinguish between "synthetic" and "real" timestamps, this RFC proposes that +we reserve a high-order bit in either the physical or the logical component of +the existing timestamp structure to denote this fact. + +##### Timestamp.Forward + +Merging a "synthetic" and "real" timestamp, typically done using a `Forward` +operation obeys the following rule: the `synthetic` bit from the larger of the +two timestamps is carried over to the result. In the case of a tie between a +"synthetic" and "real" timestamp, the `synthetic` bit is not carried over to the +result. This is because a timestamp with a "real" bit is a firmer guarantee and +carries more information than a timestamp with a "synthetic" – such a timestamp +not only describes a time, but also guarantees that the time is less than or +equal to "present time". + + +Examples: +``` +Forward({synth, 5}, {synth, 6}) = {synth, 6} +Forward({real, 5}, {synth, 6}) = {synth, 6} +Forward({synth, 5}, {real, 6}) = {real, 6} +Forward({real, 5}, {real, 6}) = {real, 6} + +Forward({synth, 6}, {synth, 6}) = {synth, 6} +Forward({real, 6}, {synth, 6}) = {real, 6} +Forward({synth, 6}, {real, 6}) = {real, 6} +Forward({real, 6}, {real, 6}) = {real, 6} +``` + +##### hlc.Update + +Outside of identifying synthetic vs. real timestamps, we must make one more +change. The hybrid-logical clock structure exposes a method called `Update` that +forwards its value to that of a real timestamp received from another member of +the cluster. The best way to understand this method and its usage is that +`Update` guarantees that after it returns, the local HLC clock will have a value +equal to or greater than the provided timestamp and that no other node's HLC +clock can have a value less that `timestamp - max_offset`. + +For "real" timestamps, it is cheap to implement this operation by ratcheting a +few integers within the HLC because we know that the timestamp must have come +from another node, which itself must have been ahead of the slowest node by less +that `max_offset`, so the ratcheting operation will not push our HLC further +than the `max_offset` away from the slowest node. + +For "synthetic" timestamps, we have to be more careful. Since the timestamp says +nothing about the "present time", we can't just ratchet our clock's state. +Instead, `hlc.Update` with a synthetic timestamp needs to wait until the HLC +advances past the synthetic timestamp either on its own or due to updates from +real timestamps. By waiting, we once again ensure that by the time the method +call returns, no other node's HLC clock can have a value less that +`timestamp-max_offset`. + +#### Commit Wait: Not Just for the Cool Kid With the Fancy Hardware + +The original [Spanner paper](https://storage.googleapis.com/pub-tools-public-publication-data/pdf/65b514eda12d025585183a641b5a9e096a3c4be5.pdf) +discussed how the system added a delay to all read-write transactions before +returning to the client to ensure that all nodes in the system had clocks in +excess of the commit timestamp of a transaction before that transaction was +acknowledged. This was discussed in section 4.1.2 and section 4.2.1 of the +paper: + +> Before allowing any coordinator replica to apply the commit record, the coordinator +> leader waits until TT.after(s), so as to obey the commit-wait rule described in +> Section 4.1.2. Because the coordinator leader chose s based on TT.now().latest, and +> now waits until that timestamp is guaranteed to be in the past, the expected wait +> is at least 2 ∗ . This wait is typically overlapped with Paxos communication. After +> commit wait, the coordinator sends the commit timestamp to the client and all other +> participant leaders. + +Up to this point, CockroachDB has avoided this concern because of its use of +uncertainty intervals. Instead of transactions ensuring that all nodes in the +system have clocks in excess of the commit timestamp of a transaction before +acknowledging it to the client, **CockroachDB ensures that all nodes in the +system have clocks in excess of the commit timestamp of a transaction minus the +`max_offset` (a weaker guarantee) before acknowledging it to the client**. This +is satisfied trivially because, to this point, transactions have always written +at or below "present time", which is by definition no further than `max_offset` +ahead of the slowest node in the cluster. CockroachDB then implements +uncertainty intervals on subsequent transactions to make up for this weakened +guarantee and ensure linearizability just like Spanner. + +If we start committing transactions in the future, this guarantee is no longer +trivially satisfied. If we want to ensure linearizability for the writes of +these non-blocking transactions, we need to do a little extra work to ensure +that the clocks on all nodes are sufficiently close to the commit timestamp of +the transactions before acknowledging their success to clients. + +Conveniently, we already have a formalism for this – `hlc.Update`. Commit Wait +would naturally fall out of calling `hlc.Update(txn.CommitTimestamp)` before +acknowledging any transaction commit to a client. This would reduce to a no-op +for standard transactions, and would result in a wait of up to +`non_blocking_duration` for non-blocking transactions. This wait artificially +increases the latency of non-blocking transactions, but critically, it ensures +that we continue to preserve linearizability. + +It is worth noting that, like Spanner, this CommitTimestamp is chosen before the +final round of consensus replication. This means that the wait will overlap with +consensus communication and will therefore be less disruptive to non-blocking +transactions than it may initially seem. So instead of the +`non_blocking_duration` adding latency to the end of a non-blocking transaction, +it will simply hide the rest of the transaction's latency. This means that given +our estimate for the `non_blocking_duration` of 130ms in a global cluster, we +would expect non-blocking transaction's to have a latency of roughly 130ms. + +#### Uncertainty Intervals: To Wait or Not to Wait + +Non-blocking transactions also force us to rethink uncertainty intervals and +question both how and why they work. Uncertainty intervals are timestamp ranges +within which a transaction is unable to make claims of causality. Given a +bounded clock offset of `max_offset` between nodes, a transaction cannot +definitively claim that a write that it observes within a `max_offset` window on +either side of its original timestamp if causally ordered before it. But of +course, it would be impossible for the write to be causally ordered after it. So +to ensure that all possible causal dependencies are captured and linearizability +is enforced, a transaction will bump its timestamp to ensure that it observes +all values in its uncertainty interval. This either incurs a transaction refresh +or a full transaction restart. + +The writes performed by non-blocking transactions are no exception. When a read +observes a write performed by a non-blocking transaction in its uncertainty +interval, it will need to bump its timestamp so that it observes the write. + +The complication here is that if a reading transaction bumped its timestamp +above a value written by a non-blocking transaction in its uncertainty interval, +it could end up with a timestamp in the future of "present time". This could +risk resulting in a stale read if this read immediately committed and then +triggered a causally dependent read that hit a slower gateway where the written +value was not considered in the second read's uncertainty interval. The following +example demonstrates this hazard: + +``` +params: + clock_offset = 10 + non_blocking_duration = 50 + +clocks: + node A's clock = 100 + node B's clock = 95 + +- txn 1 begins on node A +- txn 1 writes in future @ time 150 +- txn 1 commits +- txn 1 begins waiting out non_blocking_duration before returning to client + +- time advances by 40 +clocks: + node A's clock = 140 + node B's clock = 135 + +- txn 2 begins on node A +- txn 2 reads at 140 and observes write in uncertainty interval +- txn 2 returns write to client +- txn 2 commits and acks client + +- txn 3 is triggered by commit of txn 2 (causal dependency) +- txn 3 begins on node B +- txn 3 reads at 135 and observed no write in uncertainty interval +- txn 3 returns nothing to client +- txn 3 commits and acks client + +- time advances by 10 +clocks: + node A's clock = 150 + node B's clock = 145 + +- txn1 acks client + +HAZARD! txn3 just saw stale read. It was triggered by txn2, which had observed + the value written by txn1, but it did not observe the value. This is a + violation of the monotonic reads property, and therefore a violation of + single-key linearizability. +``` + +Careful readers may notice that the first read-only transaction (txn2) would +have had to bump its timestamp to 150 upon observing a value in its uncertainty +interval. So if it had respected the Commit Wait rule then it would have been +delayed before committing, after which point the stale read would not be +possible because node B's clock would have advanced far enough that the value +would have been in the second read-only transaction's (txn3) uncertainty +interval. + +But relying on Commit Wait here violates a desirable property of our transaction +model – that committing a read-only transaction is a no-op. To recover this +property, we'll want to wait out the uncertainty before the refresh/retry. +Again, the easiest way to do this is to `hlc.Update(txn.ReadTimestamp)` before +allowing a refreshed/retried transaction to read at its new timestamp. This +again becomes a no-op when the new ReadTimestamp is "real" and not "synthetic". + +So while reading transactions which conflict with writes from non-blocking +transactions never block on locks, because those locks have already been +released by the time the read conflicts with the write, these reading +transactions do occasionally need to wait to resolve uncertainty and ensure that +linearizability is maintained. We present the full picture of how non-blocking +transactions interact with standard transactions [later +on](#non-blocking-and-standard-transaction-interactions). + +### Future-Time Closed Timestamps + +To this point, we've talked about writing in the future as a means to reduce +contention, but doing so has a second major benefit – it enables non-stale +follower reads if coupled with a closed timestamp mechanism that closes off +timestamps in the future. + +This RFC proposes that "non-blocking Ranges" use a separate closed timestamp +tracker that is configured to close time out at least `WAN_RTT / 2 + max_offset` +in the future. This ensures that all followers in these ranges will hear about +and active closed timestamps above "present time" so that they can serve all +"present time" reads locally. + +The details of this change are TBD, but all complications here appear to be +engineering related and not foundational. For instance, there currently exists +only a single closed timestamp tracker per store. Future-Time Closed Timestamps +would mandate the existence of at least two closed timestamp trackers. + +#### Aside: Writing in Future vs. Closing Time in Future + +It may be evident to readers that there is some inherent but fuzzy connection +between the idea of writing in the future and closing time in the future. It is +interesting to point out that in systems like Spanner and YB which assign +monotonically increasing timestamps to all writes within a replication group and +therefore "instantaneously" close out time, these two operations are one and the +same. + +CockroachDB's transaction and replication model are not quite set up to support +this. Transactions in CockroachDB are optimized for committing at their original +read timestamp, so they don't like when their writes get bumped to higher +timestamps. This allows for optimistic, lockless reads. It also avoids a layer +of write amplification, wherein provisional values must be moved to a higher +timestamp during intent resolution. This write amplification is concerning +enough that systems like Google's Percolator introduce a specialized storage +component (see the "write metadata" column) to perform the translation without +needing to rewrite the data [6]. + +Additionally, replication in CockroachDB is below the level of evaluation (see +[proposer evaluated kv](20160420_proposer_evaluated_kv.md)), so the timestamp of +a replicated value need to be determined before replication-level sequencing. +This allows for evaluation-level parallelism and less need for determinism +during evaluation. This evaluation-level parallelism helps avoid the problem of +the "parallelism gap" seen between compute and/or storage intensive processing +before replication and after replication [7]. + +For these reasons, the closed timestamp of a Range lags the timestamps of writes +in a Range and the closed timestamp is more of a Range-level concern and less of +a Request-level concern in CockroachDB (i.e. a single request cannot dictate the +closed timestamp for a Range). This is why the closed timestamp dictates the +time at which requests perform writes (as we'll see in the next section) instead +of the other way around. + +### Becoming a Non-Blocking Transaction + +As stated earlier, this proposal does not expose "non-blocking transactions" to +users directly. Instead, it exposes "non-blocking Ranges" to users. It was +also mentioned earlier that a "non-blocking Range" will have a closed +timestamp tracker that is leading "present time" by some duration. As it turns +out, configuring this closed timestamp tracker to lead "present time" by +`non_blocking_duration` is enough to implement "non-blocking transactions" +without any other server-side changes. + +Any standard transaction that writes to a "non-blocking Range" will naturally +get pushed into the future. If it had performed writes in the past, these will +eventually get moved up to the new commit timestamp. If it had performed reads +in the past, these will eventually get refreshed up to the new (synthetic) +commit timestamp before committing. The new Commit Wait logic in the +`txnCommitter` will ensure that these transactions naturally wait before +retuning to the client. + +So in a very real sense, "non-blocking transactions" will not exist in the code, +although it is still useful to classify any transaction that commits with a +commit timestamp in the future of "present time" as non-blocking (i.e. those +with synthetic commit timestamps). + +### Non-blocking and Standard Transaction Interactions + +There are a number of interesting interactions that can happen if we allow +non-blocking transactions to touch (read from and/or write to) standard ranges +and interact with standard transactions. These interactions can increase the +latency of conflicting writes made by standard transactions. However, they will +not meaningfully increase the latency of conflicting reads made by standard +transactions. + +first \ second | standard read | standard write | non-blocking txn's read | non-blocking txn's write +-------------------------|---------------|----------------|-------------------------|------------------------- +standard read | both bump ts cache, no interaction | bump ts cache, may bump write ts, but still below present time | both bump ts cache, no interaction | bump ts cache, no interaction +standard write | read ignores write if below, read waits on write if above | 2nd write waits on 1st write, bumps write ts above 1st write | read waits on write | non-blocking write waits on standard write +non-blocking txn's read | both bump ts cache, no interaction | **bump ts cache, bump write ts, standard write becomes non-blocking write, must wait on commit** | both bump ts cache, no interaction | bump ts cache, may bump write ts +non-blocking txn's write | **no interaction if write above uncertainty, read waits up to max_offset if write within uncertainty** | **standard write waits on non-blocking write, bumps write ts, standard write becomes non-blocking write, must wait on commit** | read ignores write if below, read waits on write if above | 2nd write waits on 1st write, bumps write ts above 1st write + +Interesting interactions in bold. + +Most of these interactions go away if we don't allow non-blocking transactions +to touch standard ranges, but such a restriction would be a serious limitation +to their utility. + +It should be noted that the contention footprint of a non-blocking transaction +is only the duration that it holds its locks and does not include its Commit +Wait latency. This means that while conflicting non-blocking transactions will +each need to perform a Commit Wait, they need only wait out their own +`non_blocking_duration`, so this latency is not additive. + +It should also be noted that while this "infection" of non-blocking transactions +to other conflicting read-write transactions is undesirable, it is no more +undesirable in practice than read-write transactions that touch global data and +hold locks for long periods of time. This is what we would expect to see with +alternative proposals such as the [Consistent Read Replicas +proposal](https://github.com/cockroachdb/cockroach/pull/39758). Under the same +workloads that would cause an "infection" of synthetic timestamps due to a +non-blocking transaction that also touches contended data on standard Ranges, we +would expect any proposal that uses locks to exhibit buildups of dependent +transaction chains waiting on the global transaction's locks to be released. +This would be similarly disruptive, so in many ways, such workloads would be an +anti-pattern under either proposal. + +### Implementation Touch Points + +* Implement synthetic timestamps + * add bit on hlc.Timestamp + * wait on hlc.Update with synthetic timestamps +* Implement CommitWait + * wait in txnCommitter after transaction commit + * hlc.Update(txn.CommitTimestamp) + * reduces to no-op for standard transactions +* Introduce non-blocking Range concept + * Closed timestamp `non_blocking_duration` in future + * To do this, need to split up per-Store closed timestamp tracker + * Tune, maybe based on a Range's replication latency + * Hook up to cluster setting + * Add to zone configs +* Route present-time reads to followers in non-blocking Ranges + * Similar to existing follower read logic, but Range specific + * Add bit on RangeDescriptor? Or to Lease? Both are cached in client +* Introduce [long-lived learner replicas](https://github.com/cockroachdb/cockroach/issues/51943) + * Biggest work item, but... + * Already being worked on and generally useful outside of this proposal + * Also, not a hard requirement for the rest of this proposal + +#### Tuning non_blocking_duration + +The `non_blocking_duration` is the distance in the future at which non-blocking +transactions acquire locks and write provisional values. The non-blocking +property of these transactions is provided when this is far enough in the future +such that a non-blocking transaction is able to navigate its entire operation +set, commit, and resolve intents before its commit timestamp its passed by +"present time + max clock offset". Further, the non-blocking property of these +transaction is provided on followers when all of these effects are replicated +and applied on followers by the time that the non-blocking transaction's commit +timestamp is passed by "present time + max clock offset". + +As such, some tuning of this value will need to be performed, likely taking into +account transaction latency and replication latency. + +This tuning will also need to take into account the "effective" clock skew +between nodes to account for a follower that is leading a leaseholder by some +duration. In this case, the follower's view of "present time" will lead that of +the leaseholder by this skew. To avoid needing to wait for a sufficiently recent +closed timestamp on the follower, the leaseholder will need to close time a +little further in the future. In practice, we'll likely add some small buffer to +the `non_blocking_duration` to account for effective clock skew. + +#### Non-Blocking Transaction Pushing + +A difficulty we face with tuning the `non_blocking_duration` is that we don't +know how long a non-blocking transaction is going to take between the time that +it writes its first intent until the time that it commits and resolves all of +its intents. This means that if we tune too low, the transaction's intents may +not be resolved by the time they drop below "present time" + uncertainty (i.e. +become visible to readers) and this could cause unintentional blocking of +readers. A potential mitigation to this is to actively push these intents +forward using a mechanism similar to the `rangefeedTxnPusher`. This mechanism +would monitor active intents on non-blocking Ranges and push them and their +transaction forward if they ever got too close to present time without being +resolved. + +Such a mechanism would also help ensure that even if the coordinator for a +non-blocking transaction died, it would be cleaned up before a read on one of +the followers conflicted with it and was forced to perform a WAN RTT. + +An alternative approach that would have a similar effect is to buffer a +non-blocking transactions writes in its coordinator until it is ready to commit. +This would mean that we would only need to tune `non_blocking_duration` to the +expected commit latency of a transaction, instead of its expected evaluation and +commit latency combined. + +We'll probably want to do one or both of these approaches. + +## Drawbacks + +Complexity. But not overly so, especially compared to Consistent Read Replicas. + +Only effective if we can reduce the clock uncertainty interval dramatically. + +## Alternatives + +### Consistent Read Replicas + +See the [Consistent Read Replicas proposal](https://github.com/cockroachdb/cockroach/pull/39758). + +Consistent Read Replicas provide non-stale reads from followers, which is one of +the two major wins of this proposal. However, they do not provide the +non-blocking property of this proposal. Consistent Read Replicas are still +disruptive to reads when reads and writes contend. + +#### Read and Write Availability + +Another area where the two proposals diverge is in fault tolerance and +availability in the presence of node failure. + +The use of closed timestamps in this proposal means that if a closed timestamp +is delayed, present-time follower reads may not be servable for a period of +time. This means that leaseholder failures can delay follower reads. However, +the failure of a follower will never disrupt the ability of another follower to +server follower reads. Similarly, it will never disrupt the ability of the +leaseholder to serve writes (assuming a quorum of replicas is alive). Put +simply, this proposal is only susceptible to leaseholder unavailability, which +is part of why it needs to make no distinction between "consistent read +replicas" and normal "followers/learners". In that sense, the proposals +relationship to unavailability is identical to that of follower reads. + +The Consistent Read Replica proposal reacts differently to various forms of +unavailability. The failure of a leaseholder will delay writes, as usual. Unlike +this proposal, the failure of a consistent read replica will also delay writes, +as the consistent read replica's lease needs to be revoked. However, it seems +possible (but challenging) for consistent read replicas to continue serving +reads in the presence of a follower **or leaseholder** failure if we ensured +that new leaseholders properly adopted the same set of existing read replicas. +So this write-everywhere, read-anywhere approach appears to reduce write +availability but increase read availability. Yet, it's worth noting that the +increased susceptibility to write unavailability can also cause an increased +susceptibility to read unavailability for any reads that contend with blocked +writes. + +#### Load Balancing Applications OR Applicability to Single Region Deployment + +Most of the discussion in both of these proposals was centered around follower +reads to reduce latency. However, follower reads as a means of load balancing is +another valid use case. This is true even if the followers are in the same data +center or region. In these cases, communication once again becomes cheaper than +waiting out clock uncertainty, which changes the comparison. + +For writes, consistent read replicas are now faster because they don't have a +Commit Wait stage. For uncontended reads, the two proposals are still the same – +there is no blocking. For contended reads, reads will still need to wait out the +uncertainty interval under this proposal. Under consistent read replicas, they +will still need to wait out the contention footprint of the contending +transaction, but this is expected to be lower because replication is faster. + +### Optimistic Global Transactions + +Link: https://docs.google.com/document/d/17SC35GR-3G61JCUl-lTnZ4o-UMMEQNMS_p4Ps66EkSI. + +Optimistic Global Transactions was a second proposal that grew organically out +of the Consistent Read Replicas proposal. The ideas were complex but powerful. +As it turns out, there are a large number of parallels that can be drawn to this +proposal if we imagine that all Ranges are placed in this "non-blocking mode". +The most important of these parallels is that reads in transactions begin local +and only need to go global to perform verification at the end. However, the big +improvement we've made here is that we've solved the initially stale reads +problem by adding the Commit Wait at the end of other writing transactions! + +In fact, with just two small extensions to this non-blocking transactions +proposal, we arrive at exactly the same place as the Optimistic Global +Transactions proposal: +- defer writes until commit time, only "upgrade" to a non-blocking transaction + at commit time so that all reads can be local during transaction evaluation. +- adapt commit protocol to acquire read locks after read validation, perform + GlobalParallelCommit to parallelize read validation with writes. + +The "hubs and spokes" architecture proposed in the Optimistic Global +Transactions is just as applicable to this proposal as it was to that one. + +### Non-Monotonic Reads + +The goal of this proposal is to avoid cases where reads need to block on writes. +However, as written, reads still do have to wait out an uncertainty interval (up +to the maximum clock offset) when they conflict with writes. This is necessary +in order to preserve single-key linearizability across all reads and writes to +non-blocking ranges. This is explored in [Uncertainty Intervals: To Wait or Not to Wait](#Uncertainty-Intervals:-To-Wait-or-Not-to-Wait). + +If we look at this closely, we see that the waiting that reads perform is not +strictly necessary to coordinate with the corresponding writes. Put differently, +by making the writes wait a little longer before acknowledging the client, we +could still ensure ["read your writes"](http://jepsen.io/consistency/models/read-your-writes). + +Instead, the waiting that reads perform is necessary to coordinate with other +reads. If this waiting was not performed, we could see cases where a read observes +a state after a change and then a causally dependent read observes the state before +the change. This would be a violation of ["monotonic reads"](http://jepsen.io/consistency/models/monotonic-reads). +To this point, we've considered such an anomaly to be disqualifying. + +Maybe that's not the case. If we were ok with violations of monotonic reads on +non-blocking transactions then we could avoid cases where reads need to block on +writes entirely by ignoring writes with synthetic timestamps in a read's +uncertainty interval. + +This doesn't seem like a realistic default, but may be a very useful opt-in +option. + +#### With Effective Uncertainty + +A slight variation of this idea is to split the notion of "max clock skew" +(O(10-100ms)) from "effective clock skew" (O(1ms)) and allow non-monotonic reads +only when clock skew exceeds this "effective clock skew". The benefit of this is +that we would still be able to place some bound on the cases where non-monotonic +read anomalies are possible (i.e. only if the effective clock skew is exceeded), +but would be able to drop the maximum uncertainty interval delay down an about +an order of magnitude. + +Again, it's difficult to think that this would be a realistic default because it +complicates our consistency story, but may make a very useful option. + +## References + +[1] Spanner: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/65b514eda12d025585183a641b5a9e096a3c4be5.pdf + * Specifically, see _Section 4.2.3. Schema-Change Transactions_, which briefly mentions a scheme that sounds similar to this, although for the purpose of running large schema changes. + +[2] Logical Physical Clocks and Consistent Snapshots in Globally Distributed Databases: https://cse.buffalo.edu/tech-reports/2014-04.pdf + +[3] Beyond TrueTime: Using AugmentedTime for Improving Spanner: https://cse.buffalo.edu/~demirbas/publications/augmentedTime.pdf + +[4] Exploiting a Natural Network Effect for Scalable, Fine-grained Clock Synchronization: https://www.usenix.org/system/files/conference/nsdi18/nsdi18-geng.pdf + +[5] SLOG: Serializable, Low-latency, Geo-replicated Transactions: https://www.cs.umd.edu/~abadi/papers/1154-Abadi.pdf + +[6] Large-scale Incremental Processing Using Distributed Transactions and Notifications: https://storage.googleapis.com/pub-tools-public-publication-data/pdf/36726.pdf + +[7] KuaFu: Closing the Parallelism Gap in Database Replication: https://www.cs.cmu.edu/afs/cs/Web/People/dongz/papers/KuaFu.pdf diff --git a/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md b/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md new file mode 100644 index 00000000000..080e343d48d --- /dev/null +++ b/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md @@ -0,0 +1,2471 @@ +- Feature Name: Read Committed Isolation +- Status: completed +- Start Date: 2023-01-22 +- Authors: Nathan VanBenschoten, Michael Erickson, Drew Kimball +- RFC PR: #100608 +- Cockroach Issue: None + +# Summary + +**Concurrency control** comprises the mechanisms that a database system employs +to guarantee the "correct" execution of concurrent operations. **Isolation +levels** provide concurrency control with the requirements for correctness — +defining how and when the changes made by one transaction become visible to +other transactions. + +Strong isolation levels provide a high degree of isolation between concurrent +transactions. They limit or eliminate the forms of concurrency effects that +transactions may observe. + +Weak isolation levels are more permissive. They trade off isolation guarantees +for improved performance. Transactions run under weaker isolation levels block +less and encounter fewer aborts (retry errors). In some systems, they also +perform less work. + +This RFC proposes the implementation of the **Read Committed** isolation level +in CockroachDB. + +``` +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED +``` + +Read Committed is a common, relatively weak isolation level found in most legacy +database systems, including PostgreSQL. In fact, it is the default isolation +level in PostgreSQL and most other legacy database systems. + +Critically for this proposal, it is the strongest isolation level present in +PostgreSQL that does not experience [serialization failure +errors](https://www.postgresql.org/docs/current/mvcc-serialization-failure-handling.html) +and require applications to handle these errors using application-side retry +logic. + +By providing users with the option to run transactions under Read Committed, we +provide them with the option to avoid transaction retry logic in their +applications and make migrations to CockroachDB easier. + +# Motivation + +As alluded to above, the main impetus behind the introduction of Read Committed +is that the isolation level is expected to **ease application migrations**. + +Existing applications that were built for Read Committed (the default isolation +level in many legacy databases, including PostgreSQL) often struggle to move to +CockroachDB's Serializable isolation level. Doing so requires the introduction +of transaction retry logic in the application to handle isolation-related +"serialization" errors. These errors are a consequence of strong isolation +guarantees. While adding retry logic is merely inconvenient for new +applications, it is often infeasible for existing applications. Implementing +Read Committed in CockroachDB will allow these applications to migrate to +CockroachDB without also migrating to a stronger isolation level at the same +time. + +For much the same reason, applications also struggle to move to CockroachDB +because they must now tolerate consistency-related "clock uncertainty" errors. +While not a direct consequence of strong isolation, these errors cannot commonly +be handled transparently by CockroachDB because of strong isolation guarantees. +Read Committed's isolation guarantees are weak enough that CockroachDB can +commonly handle consistency-related retry errors internally without involvement +from the application, eliminating the other reason for application-side retry +logic. + +Performance-sensitive applications may also prefer Read Committed over +Serializable. The isolation guarantees made by Read Committed are weak enough +that implementations can prevent readers from blocking writers (or causing them +to retry) and writers from blocking readers (or causing them to retry). This +limits the forms of transaction contention that can cause performance +degradation to just write-write contention. + +Transaction contention is notoriously difficult to understand, predict, and +mitigate. This is because contention is a global property of an entire workload, +not a local property of single transactions. It emerges from the conflicts +between two transactions, their relative timing, and the degree to which the +transactions are impacted by contention with other transactions. As a result, it +is often associated with meta-stable failures, where a system behaves well until +contention reaches a tipping point, beyond which throughput collapses. Limiting +contention concerns to write-write contention is therefore a major win for +**performance predictability**. In exchange, Read Committed is permissive of +concurrency anomalies, but this may be the right trade-off for some +applications. + +In addition, while the primary beneficiaries of the new isolation level are +users that run their apps entirely under the weaker isolation level, users +running their apps under Serializable isolation may still benefit from this +work. Read Committed will provide these users with a tool to run **bulk +read-write transactions** alongside their applications without risk of these +bulk transactions being starved due to serialization errors. This has been a +common struggle for users of CockroachDB. + +Arguably, these bulk transactions would be just as well served (or in some cases +better served) by Snapshot isolation, an isolation level between Read Committed +and Serializable. However, CockroachDB does not support this isolation level +either. This reveals the fourth benefit of this work — that it **paves the road +to and then past Snapshot isolation** (Repeatable Read, in PostgreSQL terms). +Almost all engineering work performed in service of Read Committed will also +benefit a potential future Snapshot isolation implementation. At that point, the +work to support Snapshot isolation will be predominantly an exercise in testing, +as many of the pieces inside the database will already be in place. + +# Background + +The following two subsections present the high-level landscape of isolation +levels and then focus on PostgreSQL's location in this landscape. This +background helps to contextualize the design choices made later on in this +proposal. However, readers that are comfortable with the topic can feel free to +jump to the [technical design](#technical-design). + +## Isolation Level Theory Primer + +[ANSI SQL](https://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt) (1992) +defined four isolation levels: READ UNCOMMITTED, READ COMMITTED, REPEATABLE +READ, and SERIALIZABLE. The levels were defined in terms of three _phenomena_: +Dirty Reads, Non-Repeatable Reads, and Phantom Reads. Stronger isolation levels +allow fewer phenomena to occur. As a result, they permit less anomalous behavior +(permit fewer anomalies). Weaker isolation levels allow more phenomena to occur. + +[A Critique of ANSI SQL Isolation +Levels](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-95-51.pdf) +(1995) demonstrated that the ANSI SQL standard definitions of isolation levels +were insufficient. Some phenomena were ambiguous, while others were missing +entirely. The work provided a new characterization of isolation levels, defining +the levels using a set of eight different phenomena. The expanded +characterization also made room for a new isolation level: SNAPSHOT. + +While more complete, these definitions were still based on preventing +conflicting operations that could lead to anomalies from executing concurrently. +Adya's dissertation ["Weak Consistency: A Generalized Theory and Optimistic +Implementations for Distributed +Transactions"](https://pmg.csail.mit.edu/papers/adya-phd.pdf) (1999) argued that +this _preventative_ approach is overly restrictive. The definitions were +"disguised versions of locking" and therefore disallow optimistic and +multi-versioning schemes. Adya's work generalizes existing isolation levels in +terms of conflicts, serialization graphs, and the forms of phenomena allowed in +the serialization graphs of different isolation levels. + +Because Adya's classification of isolation levels and phenomenon is general +enough to allow both locking and optimistic transaction implementations (of +which CockroachDB is a hybrid), we use its formalization where appropriate in +this proposal. Readers are encouraged to familiarize themselves with the thesis. +Once they notice the page count, they are encouraged to familiarize themselves +with the [summary paper](https://pmg.csail.mit.edu/papers/icde00.pdf) instead. + +Conveniently, [Elle](https://github.com/jepsen-io/elle), a transactional +consistency checker that we intend to use as part of the [testing for this +work](#testing), also talks in the language of Adya-style transaction histories +and anomalies. + +For a different take on transaction isolation, readers can familiarize +themselves with the work of Crooks et al. in [Seeing is Believing: A +Client-Centric Specification of Database +Isolation](http://www.cs.cornell.edu/lorenzo/papers/Crooks17Seeing.pdf). The +work presents a formalization of isolation guarantees from the perspective of +the users of a database system, instead of from the perspective of the system +itself. Crook's handling of different variants of Snapshot isolation is +particularly novel. + +## Isolation Levels in PostgreSQL + +Transaction isolation in PostgreSQL is an interesting and relevant topic that is +well documented [here](https://www.postgresql.org/docs/current/transaction-iso.html). + +PostgreSQL provides all four standard transaction isolation levels: READ +UNCOMMITTED, READ COMMITTED, REPEATABLE READ, and SERIALIZABLE. However, READ +UNCOMMITTED is not implemented and maps to READ COMMITTED, so internally the +system only supports three distinct isolation levels. Furthermore, for a variety +of reasons and in a variety of ways, the three isolation levels that are present +deviate from their ANSI SQL definitions. Let's explore those briefly, from +strongest to weakest. + +`SERIALIZABLE` is the strongest isolation level, and PostgreSQL has supported +true Serializable isolation since its 9.1 release (2011). Being the only +unambiguously defined ANSI SQL isolation level, PostgreSQL's implementation of +Serializable is true Serializable (PL-3 in Adya). + +However, PostgreSQL deviates from ANSI SQL's assumed use of [two-phase +locking](https://en.wikipedia.org/wiki/Two-phase_locking) (2PL) to implement +Serializable isolation. Instead, it uses an optimistic concurrency control +protocol called [Serializable Snapshot +Isolation](https://courses.cs.washington.edu/courses/cse444/08au/544M/READING-LIST/fekete-sigmod2008.pdf) +(SSI). SSI extends the multi-versioning present in Snapshot isolation with +additional runtime conflict detection to provide Serializable isolation. The +implementation of SSI in PostgreSQL is recounted by Ports and Grittner in +[Serializable Snapshot Isolation in +PostgreSQL](https://drkp.net/papers/ssi-vldb12.pdf). + +Notably, CockroachDB also implements Serializable isolation using SSI. + +`REPEATABLE READ` is the next strongest standard isolation level. Formally, the +isolation level (PL-2.99 in Adya) permits Phantom Reads but does not permit +Write Skew. In its place and under the same name, PostgreSQL implements Snapshot +isolation (PL-SI in Adya). Snapshot isolation does not permit Phantom Reads but +does permit Write Skew. This deviation is not strictly wrong from a standards +conformance perspective due to ANSI SQL's ambiguous definitions (which is what +prompted Berenson, Bernstein et al.'s +[critique](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-95-51.pdf)), +but it is confusing. + +Architecturally, the deviation is a consequence of PostgreSQL's multi-version +concurrency control scheme and general avoidance of read locks. PostgreSQL's +docs explain the [benefit of this +approach](https://www.postgresql.org/docs/current/mvcc-intro.html): + +> The main advantage of using the MVCC model of concurrency control rather than +> locking is that in MVCC locks acquired for querying (reading) data do not +> conflict with locks acquired for writing data, and so reading never blocks +> writing and writing never blocks reading. + +When viewed as traditional Snapshot isolation, PostgreSQL's REPEATABLE READ +isolation level is the most straightforward of the bunch. Transactions establish +a consistent read snapshot of previously committed data when they begin. All +reads across statements in the transaction are served out of this snapshot +(ignoring read-your-writes). Mutation statements evaluate their row search using +the read snapshot. They then lock and modify qualifying rows, aborting if any +locked row has been changed since the transaction began ("first committer +wins"). These locks then block other writers, but not readers, until the locking +transaction commits. + +`READ COMMITTED` is the second weakest standard isolation level, stronger only +than `READ UNCOMMITTED` (which PG does not implement). It is also the focus of +this RFC, so we will give its implementation in PostgreSQL extra attention. + +Formally, the isolation level (PL-2 in Adya) permits Non-Repeatable Reads, +Phantom Reads, Lost Updates, and Write Skew, but does not permit Dirty Reads or +Dirty Writes. PostgreSQL's implementation of Read Committed adheres to the +requirements of the isolation level. It also includes additional monotonicity +and recency properties beyond what is required by Read Committed. As a result, +it may be more accurately characterized as an implementation of [Monotonic +Atomic View](https://www.vldb.org/pvldb/vol7/p181-bailis.pdf). + +Each statement in a Read Committed transaction establishes a consistent read +snapshot of previously committed data when that statement began. For SELECT +statements, all reads are served out of this per-statement consistent snapshot +(ignoring read-your-writes). As a result, the statement never sees either +uncommitted data or changes committed by concurrent transactions during +statement execution. However, successive SELECT statements in the same +transaction receive different read snapshots and can see different data. Later +statements always receive newer read snapshots. + +Mutation statements (e.g. `UPDATE` and `DELETE`) evaluate their row search using +the per-statement read snapshot. They then lock qualifying rows, receiving the +latest version of the row. Once locked, rows that were unchanged since the +statement's read snapshot are mutated. Rows that were changed since the +statement's read snapshot are passed back through the mutation's search +predicate to determine whether they still qualify for mutation. This process of +re-evaluating the query predicate after locking is referred to as +`EvalPlanQual`, and is described +[here](https://github.com/postgres/postgres/blob/f03bd5717eaf31569ca797a2f7d65608f88ac2a2/src/backend/executor/README#L350). + +Re-evaluating query predicates when skew is detected between a mutation's read +snapshot and its locked snapshot avoids the need for serialization errors on +write-write conflicts. However, it also permits concurrency anomalies like Lost +Updates. One way to understand this is that anomalies can occur because the +mutation's row search is performed at a different MVCC snapshot than its +mutations, so interleaving mutations from other transactions can be lost. + +An example of this is presented in [PG's +docs](https://www.postgresql.org/docs/current/transaction-iso.html#XACT-READ-COMMITTED:~:text=Because%20of%20the,with%20transactions%20like%3A), +near the text "it is possible for an updating command to see an inconsistent +snapshot". + +Explicit row-level locking statements (e.g. `SELECT FOR UPDATE`) behave +similarly to mutations in Read Committed transactions. The initial row search of +the statement is performed using a non-locking read. Qualifying rows are then +locked and their latest version is retrieved. Rows that were changed since the +statement's read snapshot are re-qualified using the same EvalPlanQual approach +and rows that still match the query qualifications are returned. + +An added complexity of `SELECT FOR UPDATE` compared to other mutations is that +multiple tables can be locked at once when the query contains a join. In these +cases, base rows that contribute to the returned join rows in each table are +locked. If re-qualification is needed, individual join rows are passed back +through the join condition to determine whether they satisfy the condition with +the latest versions of the contributing rows. If so, the join row is still +returned. Otherwise, it is discarded. + +For readers that want to learn more about isolation levels in PostgreSQL, +[Jepsen's 2020 analysis of PostgreSQL +12.3](https://jepsen.io/analyses/postgresql-12.3) is an enjoyable read. + +### Data Consistency Checks at the Application Level + +PostgreSQL [documents](https://www.postgresql.org/docs/current/applevel-consistency.html) +approaches that application developers can use to enforce application-level data +integrity rules at the different isolation levels. + +As expected, the rules are trivial for Serializable isolation. By emulating +serial transaction execution, Serializable isolation has the unique property +that any interleaving of correct transactions will preserve application-level +data constraints, even if the database system is not aware of the constraints. +This property is a major simplification for application developers, and it is +why some are willing to pay a performance penalty. + +Repeatable Read and Read Committed are both susceptible to concurrency +anomalies, so the rules and strategies to ensure correctness are more complex. +PostgreSQL recommends the use of explicit row-level (`SELECT FOR UPDATE`, +`SELECT FOR SHARE`) and table-level (`LOCK TABLE`) locks to protect against +concurrent operations that could violate data integrity. + +The use of explicit locking to enforce correctness [has been +demonstrated](http://www.bailis.org/papers/acidrain-sigmod2017.pdf) to be subtle +and error-prone, and yet, it is a task bestowed upon the application developer +by these weaker isolation levels. You get what you pay for. + +Of note is that explicit locking is never necessary to preserve data integrity +under Serializable isolation. However, it is necessary in some cases for weaker +isolation levels. This elevates the importance of a correct implementation of +explicit locking; data integrity depends on it. This will be discussed later in +the RFC. + +### Data Consistency Checks at the System Level + +While not externally documented, PostgreSQL enforces system-level data integrity +rules (foreign key constraints, etc.) under weak isolation levels in the same +manner. Explicit row-level locks are acquired when performing constraint checks +and these locks are held for the remainder of the transaction. + +Foreign key existence checks are an example of such a system-level consistency +check. Under the hood, PostgreSQL runs a `SELECT FOR SHARE` query that [looks +like this](https://github.com/postgres/postgres/blob/75f49221c22286104f032827359783aa5f4e6646/src/backend/utils/adt/ri_triggers.c#L363) +(specifically, the query uses `SELECT FOR KEY SHARE`). + +### Serialization Failure Handling + +PostgreSQL also [documents](https://www.postgresql.org/docs/current/mvcc-serialization-failure-handling.html) +the approaches that application developers can use to handle serialization +failures. As discussed earlier, both Repeatable Read and Serializable isolation +can produce serializable errors (`code 40001 - serialization_failure`). +Repeatable Read will raise them on write-write conflicts. Serializable will also +raise them on certain conflict cycles involving only read-write conflicts. + +The documentation recommends that application developers using either of these +isolation levels add retry loops in their application around transactions, and +that these loops should unconditionally retry on `serialization_failure` errors. + +Read Committed transactions are not susceptible to these errors, so such retry +loops are not needed. This again is the [primary motivation](#motivation) for +the introduction of Read Committed into CockroachDB. + +The documentation does mention that all three isolation levels are susceptible +to locking deadlock errors (`code 40P01 - deadlock_detected`). However, it makes +a weaker recommendation about how application developers should handle this +class of error. Retry loops "may be advisable", but in practice, these deadlocks +are typically handled by structuring transaction logic to use a consistent +locking order. + +# Technical Design + +The design of the Read Committed isolation level is sprawling. However, the bulk +of the design is split across three major areas of focus: +- [Transaction Model](#transaction-model) +- [Row-Level Locks](#row-level-locks) +- [Query Planning and Execution](#query-planning-and-execution) + +## Transaction Model + +This RFC proposes a Read Committed implementation in CockroachDB based on a +"Per-Statement Snapshot Isolation" transaction model. To understand this model, +it is helpful to start at CockroachDB's current Serializability model, +resuscitate the corresponding form of Snapshot isolation[^1], and then +generalize this model with the ability to operate over multiple read snapshots +to arrive at Read Committed. + +[^1]: CockroachDB originally supported Snapshot isolation. However, the + isolation level was removed in v2.1 (October, 2018) by + [#26475](https://github.com/cockroachdb/cockroach/issues/26475) due to its + multiple bugs. Many of these bugs were a result of CockroachDB's minimal + support for row-level locking at the time. + +To do so, we start by defining two properties: + +**Write skew tolerance**: Does the isolation level permit write skew? In +CockroachDB, this property can be expressed as whether the isolation level +allows transactions to write and commit at an MVCC timestamp above the MVCC +timestamp of its read snapshot(s). + +**Read snapshot scope**: Does the isolation level allow transactions to operate +across multiple read snapshots? If not, a single read snapshot is used for the +entire transaction. If so, what is the scope of each read snapshot? + +With these two properties, we can then construct a unifying framework for the +three isolation levels: + +| Isolation Level | Write Skew Tolerance | Read Snapshot Scope | +|---------------------|----------------------|---------------------| +| Serializable (SSI) | No | Per-Transaction | +| Snapshot (SI) | Yes | Per-Transaction | +| Read Committed (RC) | Yes | Per-Statement | + +Interested readers can find a proof of correctness of this transaction model [in +the appendix](#appendix-proof-of-correctness). + +### Write Skew Tolerance + +The primary difference between a Serializable implementation and a hypothetical +Snapshot implementation is Snapshot's tolerance of write skew. + +Like in other MVCC systems that support Snapshot isolation, Snapshot isolation +in CockroachDB would permit a transaction's read timestamp and its write +timestamp to diverge and still allow that transaction to commit. Reads performed +at the transaction's read timestamp would never be validated through a refresh +at the transaction's eventual commit timestamp. + +Consequently, it would be possible for a key-value that was read at a +transaction's read timestamp to be updated by a second, concurrent transaction +at an MVCC timestamp greater than the first transaction's read timestamp but +less than the first transaction's commit timestamp. The initial read would "no +longer be valid" at the first transaction's commit timestamp. This setup forms +the basis for the write skew anomaly. + +Because skew is permitted between (unlocked) reads T a transaction's read +timestamp and its final commit timestamp, reads do not need to be tracked by a +transaction's coordinator, and read refreshes are not needed. The transaction +coordinator can forgo the use of a `txnSpanRefresher` entirely, which is the +component responsible for read span tracking and refreshing. + +This tolerance to write skew applies to Read Committed transactions as well. + +### Per-Statement Read Snapshots + +Write skew tolerance is the major difference between a Serializable +implementation and a hypothetical Snapshot implementation. The major difference +between a hypothetical Snapshot implementation and a Read Committed +implementation is per-statement read snapshots. + +Like in PostgreSQL, each statement in a Read Committed transaction will +establish a new read snapshot and will use that snapshot (i.e. MVCC timestamp) +for all of its reads. This will be accomplished by forwarding a `kv.Txn`'s +`ReadTimestamp` to `hlc.Now()` on each statement boundary (i.e. each call to +`kv.Txn.Step`). + +By establishing a new read snapshot for each statement, `SELECT` statements and +unlocked portions of mutation statements will observe only data committed before +the statement began. They will not observe uncommitted data or changes committed +concurrently with statement execution. + +From this description, it is evident that Read Committed does not provide +repeatable reads. `SELECT` statements in the same transaction can return +different versions of the same data. + +Statements will see the effects of writes performed by previous statements +within the same transaction. However, they won't see the effects of writes +performed by the same statement in the same transaction. This avoids the +"Halloween Problem"[^2]. There are no differences between the Serializable and +Read Committed isolation levels as they relate to intra-transaction visibility, +so no changes are needed here. + +[^2]: Note that there are currently known bugs in the area of intra-transaction + visibility (e.g. [#70731](https://github.com/cockroachdb/cockroach/issues/70731)) + which this work does not intend to fix. + +#### Monotonicity and Atomicity Guarantees + +The use of a globally consistent per-statement read snapshot that advances +between statements is a stronger consistency model than is strictly required by +Read Committed. Notably, it prevents a transaction from observing some of a +committed transaction's effects and then later failing to observe other effects +from the same committed transaction. + +With these guarantees, the isolation level could be considered an implementation +of [Monotonic Atomic View](https://www.vldb.org/pvldb/vol7/p181-bailis.pdf). +This closely matches the behavior of PostgreSQL, which has also been +[characterized similarly](https://github.com/ept/hermitage). + +#### Recency Guarantees + +The use of a per-statement read snapshot that includes all data committed before +(in real-time) the statement began is also a stronger consistency model than is +required by Read Committed. However, this again would match the behavior of +PostgreSQL and other legacy database systems. + +### Read Uncertainty Intervals + +Providing strong recency guarantees in a distributed system requires more effort +than it does in a single-machine system. + +CockroachDB already provides such real-time constraints across transactions. +Formally, the system provides a single-object consistency model of +[linearizability](https://jepsen.io/consistency/models/linearizable). Translated +to a multi-object, multi-operation consistency model, CockroachDB has been +characterized informally as providing [a guarantee of "no stale +reads"](https://www.cockroachlabs.com/blog/consistency-model/). A transaction +observes all data committed before the transaction began. + +To provide this guarantee in a distributed system without perfectly synchronized +clocks but with the ability to place _some_ bound on clock skew between nodes, +the system employs "read uncertainty intervals". Readers unfamiliar with the +concept are encouraged to read [this blog +post](https://www.cockroachlabs.com/blog/living-without-atomic-clocks/) and then +[this tech +note](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvserver/uncertainty/doc.go). + +Read Committed transactions have the option to provide the same "no stale reads" +guarantee at the level of each individual statement. Doing so would require +transactions to reset their `GlobalUncertaintyLimit` and `ObservedTimestamps` on +each statement boundary, setting their `GlobalUncertaintyLimit` to `hlc.Now() + +hlc.MaxOffset()` and clearing all `ObservedTimestamps`. + +We propose that Read Committed transactions do not do this and instead use the +same uncertainty interval across all statements. The cost of resetting a +transaction's uncertainty interval on each statement boundary is likely greater +than the benefit. Doing so increases the chance that individual statements retry +due to `ReadWithinUncertaintyInterval` errors. In the worst case, each statement +will need to traverse (through retries) an entire uncertainty interval before +converging to a "certain" read snapshot. While these retries will be scoped to a +single statement and [should not escape to the client](#transaction-retries), +they do still have a latency cost. + +We make this decision because we do not expect that applications rely on strong +consistency guarantees between the commit of one transaction and the start of an +individual statement within another in-progress transaction. To rely on such +guarantees would require complex and surprising application-side +synchronization. + +This design decision can be revisited if it proves to be problematic for +applications. We've been surprised before. + +For a discussion of how uncertainty errors are handled, see +[below](#consistency-related-retries). + +### Write-Write Conflict Handling (or Lost Update Intolerance) + +While write skew is permitted under Snapshot isolation, lost updates are not. +Like under Serializable isolation, transactions throw errors on write-write +conflicts. This is commonly referred to as a "first writer wins" or "first +committer wins" conflict resolution policy. + +Protection against lost updates is weaker under Read Committed isolation, where +a limited form of the anomaly is permitted. Updates cannot be lost between a +single statement's read and write timestamp, but can be lost across one +statement's read timestamp and a subsequent statement's write timestamp. This is +a consequence of the use of [per-statement read +snapshots](#per-statement-read-snapshots) in Read Committed. + +To understand this, we first decompose the definition of a write-write conflict +as follows: + +**Write-Write Locking Conflict**: any case where a transaction attempts to lock +or write to a key that is locked with an exclusive lock by a different +transaction, where intents are considered to be a combination of an exclusive +lock and a provisional value. + +**Write-Write Version Conflict**: any case where a transaction attempts to lock +or write to a key that has a _committed version_ with an MVCC timestamp greater +than the locking/writing transaction's current read snapshot. + +We define _write-write version conflict_ in terms of a transaction's "current +read snapshot" (i.e. `txn.ReadTimestamp`) to afford flexibility in the +definition to transactions that change their read snapshot across their +execution. For example, Read Committed transactions advance their read snapshot +on each statement boundary, so a committed version that would cause a +write-write version conflict for one statement may not cause a write-write +version conflict for a later statement in the same transaction. + +Snapshot and Read Committed transactions handle _write-write locking conflicts_ +identically to Serializable transactions. The prospective locker +[waits](#blocking-write-write-conflicts) for the existing exclusive lock to be +released before acquiring it. In cases where multiple transactions wait for +exclusive access to the same key, they form an orderly queue through the +`lockWaitQueue` mechanism. + +Once a transaction (regardless of isolation level) has navigated any potential +_write-write locking conflict_, it may experience a _write-write version +conflict_. This occurs when the holder of the lock performs a write on the +locked row before committing. In such cases, a `WriteTooOld` error is thrown. + +For Snapshot and Serializable transactions, this requires the entire transaction +to retry. We will see below that this is not the case for Read Committed +transactions. A write-write version conflict will raise a `WriteTooOld` error +that will cause the Read Committed transaction to retry the individual statement +that experienced the write-write version conflict at a new read snapshot, but +will not require the entire Read Committed transaction to retry. This +adaptability is possible because Read Committed transactions use per-statement +read snapshots. + +This behavior deviates from PostgreSQL, which does not require statements in +Read Committed to restart on write-write conflicts. We [explore an alternative +approach](#appendix-postgres-compatible-intra-mutation-consistency) that behaves +more similarly to PostgreSQL in the appendix, where we also explain why that +approach was rejected in favor of the stronger model presented here. + +### Blocking Behavior + +A transaction's _blocking behavior_ defines the cases in which contention with +other transactions does or does not block. + +Isolation levels do not directly mandate a specific blocking behavior — the same +isolation level can be implemented differently and lead to different blocking +behavior while providing the same isolation guarantees. However, weaker +isolation levels often make it easier to avoid blocking with fewer consequences +(e.g. potential aborts). + +In the following subsections, we outline the blocking behavior of Read Committed +transactions for the four different classes of conflicts composed of read and +write operations performed by two different transactions on the same row(s). + +In all cases, exclusive locking reads (`SELECT FOR UPDATE`) are treated as a +type of write. Shared locking reads complicate the discussion for little +immediate benefit, so [their discussion is deferred](#shared-locks). + +The name of each conflict refers to the order in which operations take place. +For example, a "write-read conflict" refers to a write to a row, followed by a +read of the same row by a different transaction. The later operation is said to +encounter the conflict, so we focus on the blocking behavior of that operation. +The earlier operation never blocks because there was no conflict at the time of +its execution. + +#### Non-Blocking Read-Read Conflicts + +Two reads to the same row from concurrent transactions never block. Furthermore, +in an MVCC system like CockroachDB, the two reads do not interact at all. The +later read is not influenced by the earlier read. + +#### Blocking Write-Write Conflicts + +Two writes to the same row cause blocking. The later writer will wait for the +lock acquired by the earlier writer to be released. This will occur when the +earlier writer's transaction is committed or rolled back. + +#### Non-Blocking Read-Write Conflicts + +A read of a row followed by a write to that row by a different transaction does +not block. The writer is allowed to proceed with its write, unencumbered. This +is an [oft-cited benefit of MVCC +systems](https://docs.oracle.com/en/database/oracle/oracle-database/19/cncpt/data-concurrency-and-consistency.html#GUID-1D60EFCC-03F4-4A04-B099-1B4DE5D02C47). + +However, while there is no blocking, the two operations do still interact. The +writer is barred from committing at an MVCC timestamp at or below the timestamp +of the reader. Failure to do so would violate the consistency of the reader's +snapshot. In CockroachDB, this coordination is facilitated through the +[TimestampCache](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvserver/tscache/cache.go#L31). + +#### Non-Blocking Write-Read Conflicts + +Finally, a write of a row followed by a read of that row by a different +transaction does not block. The reader is allowed to proceed with its read after +determining whether it should observe or ignore the conflicting write. Either +way, it does not block and wait for the writer to commit. This is the other +[oft-cited benefit of MVCC +systems](https://docs.oracle.com/en/database/oracle/oracle-database/19/cncpt/data-concurrency-and-consistency.html#GUID-1D60EFCC-03F4-4A04-B099-1B4DE5D02C47). + +Non-blocking write-read conflicts are the **only blocking behavior interaction +that will change with the Read Committed work**. As such, they deserve focused +attention. + +Currently, a subset of write-read conflicts are blocking in CockroachDB. This +has been a repeated source of confusion and frustration for users of the system. +This is especially true for migrations from PostgreSQL, which provides +non-blocking write-read conflicts in all isolation levels. Unexpected blocking +can cause performance problems, unpredictability, and application deadlocks. + +Preliminary discussions with users of CockroachDB revealed that a desire for +this property is one of the underlying reasons why they ask for Read Committed, +alongside the desire for fewer transaction aborts. + +And yet, the property is not strictly an artifact of isolation. As PostgreSQL +demonstrates, it can be provided at all isolation levels. CockroachDB's +engineering team has [previously +explored](https://docs.google.com/document/d/1ji6C0aDI6n61sVKPjf5-YUucbtgBlwfpsrNdcidW5a0/edit?usp=sharing) +what it would take to support non-blocking write-read conflicts under +Serializable isolation. + +That earlier proposal outlined a long-term vision for changes that would +primarily benefit transaction contention, but would also benefit multi-region +transactions, one-phase commits, foreign key lookups, and more. It was ambitious +but difficult to decompose. It also struggled with increasing the risk of +transaction starvation without sufficient recourse for users, at least until +either SHARED locks or weaker isolation levels are introduced. + +This RFC proposes two deviations from that original proposal. First, we will +start by supporting the (non-)blocking behavior **only at the Read Committed +isolation level**, where there are fewer trade-offs. Read Committed transactions +are not subject to the increased risk of starvation in order to provide this +property, so there are fewer risks to the work. Second, we will implement the +(non-)blocking behavior using **a simpler approach** that has fewer auxiliary +benefits and shifts work from writers to readers but still achieves the +sought-after non-blocking property. + +After the mechanisms to support non-blocking write-read conflicts are built as +part of this work, we can then revisit their use for Serializable transactions. +At that point, engineering efforts can focus solely on the problem of read +predictability vs. fairness. If the property is eventually extended to +Serializable transactions, negatively impacted users will be able to combat +starvation by using SHARED locks during select reads or by running select +transactions under Read Committed isolation. + +##### Status Check RPC on Intent Conflicts + +To implement non-blocking write-read conflicts, the conflict resolution rules +for non-locking readers that encounter conflicting intents will be adjusted. +Note here that "conflicting" means that the read timestamp of the reader is +equal to or greater than the write timestamp of the intent and that the reader +is attempting to read the intent's key. The reader must determine whether the +intent should be included in its read snapshot. + +In such cases, the presence of the intent alone cannot be used to determine +whether the intent's writer is still running or whether it has already been +committed. Distinguishing between these two cases as a reader is necessary to +ensure both atomicity and (CAP) consistency. + +Instead of waiting on these conflicting intents until the intent's transaction +completes, the non-locking readers will perform a "status check" RPC in the form +of a `PushTxn(PUSH_TIMESTAMP)` to the transaction record of encountered intent +to determine its visibility. If the intent's transaction has already been +committed or aborted, the reader must initiate and wait for intent resolution +and then observe the post-resolution state[^3]. However, if the intent's +transaction is in progress, the reader can [push the minimum commit +timestamp](https://github.com/cockroachdb/cockroach/pull/95911) of the intent +holder to prevent it from committing at or before the reader's read timestamp. +The reader can then proceed to ignore the intent and continue with its scan. + +[^3]: there are other projects like + https://github.com/cockroachdb/cockroach/issues/91848 that aim to reduce + blocking between intent resolution and conflicting KV operations. These are + out of the scope of this RFC. + +To make the performance of this approach reasonable, two changes will be needed. + +First, requests must cache status check results for transactions that have been +pushed, were seen to be `PENDING` after the request began, and are uncommittable +below the request's read timestamp. This cache can be used to prevent a request +from needing to perform a status check for the same transaction multiple times. + +The cache must be request-scoped and not scoped more broadly (e.g. +range-scoped). This is because: Requests have different read timestamps. The +cache can only be used to avoid a PushTxn request if a prior PushTxn advanced +the intent holder's minimum commit timestamp above a request's read timestamp. +Requests originate at different times. A PushTxn that previously observed a +PENDING intent holder _before_ a request arrived is no proof that the intent +holder has not committed before the request's transaction began. Ignoring such +intents because they _used to be_ PENDING (without additional uncertainty +interval logic) could lead to violations of real-time ordering. + +For ease of implementation, a new request-scoped cache of non-visible +transactions will be added to `concurrency.Guard`, making it per-request and +per-range scoped. This is convenient because it avoids wire-protocol changes and +because `concurrency.Guard` objects are scoped below transaction read_timestamp +adjustments, so cache eviction logic on transaction refresh will be implicit. As +a result of this scoping, a given read request may query the status of a given +pending transaction at most O(ranges) times, which is deemed to be a reasonable +cost. + +Second, readers must be taught to ignore conflicting intents that are known to +be PENDING without traversing consensus to resolve (rewrite) them. This can be +achieved through either an augmentation of the `intentInterleavingIter` or by +providing the MVCC scanner with another view into the `concurrency.Guard`. The +approach is outlined in +[#94730](https://github.com/cockroachdb/cockroach/issues/94730) and has been +prototyped in +[nvanbenschoten/nonBlockingReads](https://github.com/nvanbenschoten/cockroach/commits/nvanbenschoten/nonBlockingReads). + +##### Ignored Exclusive Lock Conflicts + +The conflict resolution rules for non-locking readers that encounter exclusive +locks will also be adjusted. This adjustment is simpler. Exclusive locks that do +not protect provisional values (e.g. those acquired by `SELECT FOR UPDATE`) will +no longer block non-locking readers. Further, because they have no associated +provisional value that may require visibility, readers need not concern +themselves with determining the status of the lock holder's transaction. +Instead, readers can simply ignore these locks. + +This matches the behavior of PostgreSQL, even under Serializable isolation. +However, as with the other half of Non-Blocking Write-Read Conflicts, this +behavior change will only initially apply to Read Committed readers until we can +be sure it does not regress performance for Serializable transactions. + +### Transaction Retries + +CockroachDB's Serializable implementation combines elements of a pessimistic and +an optimistic transaction model. Write-write conflicts are eagerly detected +during statement execution and lead to immediate transaction aborts. However, +"dangerous structures" (see [Serializable Snapshot +Isolation](https://courses.cs.washington.edu/courses/cse444/08au/544M/READING-LIST/fekete-sigmod2008.pdf)) +that could otherwise lead to non-serializable histories are detected at commit +time. + +This optimistic validation is accomplished through the combination of a +commit-time condition that a transaction's `ReadTimestamp` equals its +`WriteTimestamp` +(https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvserver/batcheval/cmd_end_transaction.go#L504) +and a [read +refresh](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go#L45) +mechanism that advances the transaction's `ReadTimestamp`. The first of these +checks fails if the transaction has established an inbound read-write +anti-dependency (e.g. its `WriteTimestamp` is pushed by timestamp cache). The +second of these checks fails if the transaction has established an outbound +read-write anti-dependency (e.g. its read refresh finds a newer version). If +both fail, the transaction aborts. + +These commit-time isolation checks are not needed for Read Committed +transactions. This is because the "dangerous structures" that can create +non-serializable histories are permitted under the Read Committed isolation +level. In practice, this means that a Read Committed transaction can commit even +if its `ReadTimestamp` is skewed from its `WriteTimestamp`.[^4] + +[^4]: This would be true of a Snapshot isolation implementation as well. + +#### Isolation-Related Retries + +We [alluded earlier](#write-write-conflict-handling-or-lost-update-intolerance) +that write-write version conflicts do not cause Read Committed transactions to +abort. This is despite the fact that write-write version conflicts raise +`WriteTooOld` errors, as they do in other isolation levels. + +When Serializable transactions encounter such situations, they restart from the +beginning to establish a new read snapshot that includes the committed version +that caused the write-write version conflict. Read Committed transactions have +the flexibility to do better, thanks to the use of [per-statement read +snapshots](#per-statement-read-snapshots). Because each statement in a Read +Committed transaction can observe a different read snapshot, `WriteTooOld` +errors can be handled at the statement level and not at the transaction level. +Critically, this allows the statement to retry at a new read snapshot without +involving the client. + +To facilitate these statement-level retries of write-write conflict errors, each +SQL statement will be run inside of a retry loop. `WriteTooOld` errors will be +caught before they escape to the client and the statement will be retried. All +prior effects of the statement will be rolled back on retry through the use of +[transaction +savepoints](https://www.cockroachlabs.com/docs/stable/savepoint.html). A +savepoint will be created at the beginning of each statement, released on +success, and rolled back on uncertainty error. + +However, there is a caveat here. If the statement has already begun streaming a +partial result set back to the client, it cannot retry transparently. By +default, the result set will be buffered up to 16KiB before overflowing and +being streamed to the client. However, this result buffer size can be configured +using the `sql.defaults.results_buffer.size` cluster setting or the +`results_buffer_size` session variable. This condition is analogous to the +[automatic retry +behavior](https://www.cockroachlabs.com/docs/stable/transactions.html#automatic-retries) +of implicit transactions. + +The only remaining source of isolation-related transaction aborts is locking +deadlocks. Unlike write-write conflicts, which can be scoped to a single +statement and retried without client involvement, locking deadlocks will result +in a transaction abort and an error returned to the client, as [is also the case +in PostgreSQL](#serialization-failure-handling). This is because locks acquired +earlier in the transaction may need to be dropped to break the deadlock. + +#### Consistency-Related Retries + +While Read Committed is a weak enough isolation level to avoid most +isolation-related causes of retries, its implementation in a distributed system +leads to a uniquely distributed concern: consistency-related retries. + +As [previously discussed](#read-uncertainty-intervals), Read Committed +transactions will use uncertainty intervals to provide real-time ordering +guarantees between transactions. The use of uncertainty intervals implies the +possibility of `ReadWithinUncertaintyInterval` errors. While reading, a Read +Committed transaction may observe an MVCC version above its read snapshot but +within its uncertainty interval. Ignoring such values could violate +linearizability, so the value must be returned. However, merging the uncertain +value into the existing read snapshot would create an inconsistent view. + +When Serializable transactions encounter such situations, they restart from the +beginning to establish a new read snapshot that includes the uncertain value. +Like with `WriteTooOld` errors, Read Committed transactions have the flexibility +to do better, thanks to the use of [per-statement read +snapshots](#per-statement-read-snapshots). Because each statement in a Read +Committed transaction can observe a different read snapshot, +`ReadWithinUncertaintyInterval` error can be handled at the statement level and +not at the transaction level. Like with write-write conflict errors, this allows +the statement to retry at a new read snapshot without involving the client. + +The same per-statement retry loop used to retry write-write conflicts will also +be used to facilitate statement-level retries of uncertainty errors. + +As with transaction-level retries of Serializable transactions, statement-level +retries of Read Committed transactions will not reset the transaction's +uncertainty interval. Even as the read snapshot advances across retries, the +upper bound of the uncertainty interval will remain fixed. This eliminates the +possibility of starvation and bounds the retry duration to the configured +maximum clock offset (i.e. the size of the uncertainty interval). In other +words, even in the worst case, this statement-level retry loop will converge. + +The same caveat about result set streaming that was mentioned in the previous +section applies to uncertainty errors as well. + +#### Retry Avoidance Through Read Refreshes + +While a per-statement retry loop limits the cases where isolation and +consistency-related retry errors escape from the SQL executor to the client, it +is better if these errors never emerge from the key-value layer in the first +place. The avoidance of such retry errors is possible in limited but important +cases through a familiar mechanism: read refreshes. + +This proposal previously mentioned that read refreshes are not needed for +isolation levels that tolerate write skew. While read refreshes are less +important for weak isolation levels, these levels can still benefit from the +mechanism in the limited cases where transactions need to adjust their read +snapshot. Specifically, even under weak isolation levels, read refreshes can be +used to handle write-write version conflicts (`WriteTooOld` errors) and read +uncertainty conflicts (`ReadWithinUncertaintyInterval` errors). Read refreshes +do so by proving that a transaction's current read snapshot is equivalent to a +later read snapshot for the set of key spans previously read by the transaction, +allowing the transaction to dynamically adjust its read snapshot without a full +restart. + +Read refreshes come in two flavors: client-side and server-side. Both forms of +refreshes will be supported for Read Committed through the use of the +`txnSpanRefresher` and the `CanForwardReadTimestamp` protocol. + +The only difference between Read Committed transactions and +Snapshot/Serializable transactions in this area is that Read Committed +transactions will clear their refresh spans when establishing new per-statement +read snapshots. Doing so will increase the success rate of read refreshes Read +Committed transactions. Additionally, it will increase the number of cases where +server-side refreshes are possible — each statement will begin with the +opportunity to perform server-side refreshes. Consequently, many simple +statement will never perform per-statement retries, even if they experience +contention. + +#### Structural Commit Conditions + +The remaining commit conditions that Read Committed transactions must check are +structural. + +First, Read Committed transactions must ensure that their intent writes have all +succeeded. If any pipelined intent writes have failed, a +`RETRY_ASYNC_WRITE_FAILURE` error will be returned. + +Second, Read Committed transactions must commit before their schema-imposed +deadline. If long-running Read Committed transactions fail to update schema +leases before attempting a commit, a `RETRY_COMMIT_DEADLINE_EXCEEDED` error will +be returned. + +None of these structural errors are user-controllable. They should not be +produced outside of extraordinary cluster conditions like node failures. Users +are not expected to handle them gracefully. + +#### Parallel Commits Protocol + +The [parallel +commits](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvclient/kvcoord/txn_interceptor_committer.go#L51) +atomic commit protocol also deserves discussion. + +The protocol extends traditional two-phase commit with a distributed commit +condition called the "implicit commit" state. This state is a function of the +staging timestamp of a transaction record and the version timestamp of each of +the intents listed in the transaction record's in-flight write set. + +The condition does not change for Read Committed transactions, and the atomic +commit protocol remains equally applicable to the weaker isolation level. + +## Row-Level Locks + +Serializable transactions in CockroachDB use read refreshes as a form of +optimistic validation to ensure the integrity of reads on commit. As discussed +above, this will not be the case for Read Committed transactions. + +This is a deliberate design decision. By permitting skew between the one or more +read snapshots observed during a transaction and the MVCC snapshot into which +the transaction installs its writes, Read Committed transactions can avoid +retries and blocking. In some sense, this flexibility is what enables the Read +Committed isolation level to accomplish its goals. In exchange, these reads +provide weak isolation guarantees. + +However, there are cases where stronger guarantees are needed for the reads in a +Read Committed transaction to enforce data integrity rules. These situations +arise at both the application level (outside the DB) and the system level +(inside the DB). In these cases, row-level locks serve as an important tool to +"upgrade the isolation" of select read operations in an otherwise weakly +isolated transaction. + +A useful mental model for those familiar with MVCC is that standard reads +performed during a Read Committed transaction are not guaranteed to remain valid +up to the point when the transaction commits. However, by acquiring locks on +specific rows when reading, writes by other transactions that would invalidate +those reads are blocked from completing until the reader commits[^5]. As a +result, stronger guarantees can be made about the relationship between certain +reads in a transaction and that transaction's writes. These guarantees then +serve as the foundation for building higher-level data integrity constraints. + +[^5]: Ignoring phantoms inserted between rows or assuming the use of gap locks. + +While CockroachDB already has limited support for row-level locks (e.g. `SELECT +FOR UPDATE`), the support lacks the expressivity and reliability that users of +Read Committed will demand. New forms of row-level locks must be introduced. +These locks must then be improved to provide strong correctness guarantees. + +### Shared Locks + +CockroachDB added support for `SELECT FOR UPDATE` in v20.1. `SELECT FOR UPDATE` +acquires an exclusive lock on each row returned from a `SELECT` statement. At +the time, the tool was primarily meant to provide fine-grained control over lock +ordering. Users could avoid transaction retries in certain cases by acquiring +locks for rows that they intended to update earlier in their transaction. This +motivation was explored in [this introductory blog +post](https://www.cockroachlabs.com/blog/when-and-why-to-use-select-for-update-in-cockroachdb/). + +`FOR UPDATE` refers to the strength of the lock acquired on rows returned from +the `SELECT` statement. If a transaction intends to later `UPDATE` a row, it +benefits from acquiring an +[Exclusive](https://github.com/cockroachdb/cockroach/blob/e67e3961a8f4314dd7d92a0bcbe0986f3ce8cee9/pkg/kv/kvserver/concurrency/lock/locking.proto#L91) +lock on the row when it initially reads the row's value. + +Under Read Committed, transactions may want to lock a row to prevent concurrent +writes even if they don't intend to `UPDATE` the row themselves. In these cases, +blocking concurrent readers is unnecessary and undesirable, so +[Shared](https://github.com/cockroachdb/cockroach/blob/e67e3961a8f4314dd7d92a0bcbe0986f3ce8cee9/pkg/kv/kvserver/concurrency/lock/locking.proto#L53) +locks are a better alternative. + +SQL provides a tool for these situations in the form of `SELECT FOR SHARE`. +`SELECT FOR SHARE` behaves identically to `SELECT FOR UPDATE`, except that it +acquires a +[weaker](​​https://github.com/cockroachdb/cockroach/blob/e67e3961a8f4314dd7d92a0bcbe0986f3ce8cee9/pkg/kv/kvserver/concurrency/lock/locking.proto#L142) +Shared lock on each of the returned rows. + +The design of Shared locks was explored in: [#101799](https://github.com/cockroachdb/cockroach/pull/101799). + +Read Committed will depend on the implementation of the Shared locking strength +for two reasons. First, applications that use `SELECT FOR SHARE` will behave +incorrectly if we continue to treat the locking strength as a no-op. Second, +CockroachDB's SQL layer will begin to use `SELECT FOR SHARE` internally to +enforce referential integrity constraints in Read Committed transactions, as we +will see later on in this proposal. + +### Reliability and Enforcement + +Row-level locks acquired by `SELECT FOR UPDATE` are currently best-effort. + +The locks are maintained only on the leaseholder (they are "unreplicated") and +are discarded in a handful of circumstances, including lease transfers, range +splits, range merges, node failures, and memory limits. This has caused user +confusion, but has not been a correctness concern to date because Serializable +transactions never rely on these locks to ensure isolation guarantees. + +With the introduction of Read Committed, best-effort row-level locks are no +longer sufficient for `SELECT FOR UPDATE`. We will need these locks to provide +stronger guarantees. + +#### Properties of Reliability + +We can split the meaning of "reliable" row-level locks into two properties: + +**Isolation**: If a lock is held on a row by a transaction, that row's value +must not be changed by any other transaction before the lock holder commits. +Here, "before" is defined in the MVCC timestamp domain. + +**Mutual Exclusion**: If a lock is held on a row by a transaction, no other +transaction may acquire a +[conflicting](https://github.com/cockroachdb/cockroach/blob/e67e3961a8f4314dd7d92a0bcbe0986f3ce8cee9/pkg/kv/kvserver/concurrency/lock/locking.proto#L142) +lock on that same row before the original lock holder commits. Here, "before" is +defined in the wall clock domain, as perceived by a client of the database. + +Without additional constraints or assumptions, neither property implies the +other. This is inconsequential today because CockroachDB's row-level locks +currently provide neither property. + +Isolation is enforced in Serializable transactions using read refreshes, +allowing the locks to be best-effort. If a lock is lost and a conflicting +version is installed on a previously locked row before the lock holder is +committed, the lock holder's refresh will fail and the transaction will abort. + +Mutual Exclusion is not provided by any other mechanism. Instead, it is a use +case for row-level locks that CockroachDB has resisted. At a theoretical level, +fault-tolerant mutual exclusion in a distributed system [is +impossible](https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html). +The best a distributed system like CockroachDB can do is strive to minimize the +cases where mutual exclusion is violated and introduce commit-time validation +that mutual exclusion was not violated (e.g. preventing a transaction from +committing if its lock was removed because the system incorrectly deemed its +coordinator to have failed). + +To support Read Committed, only the Isolation property is necessary. However, +some solutions move us closer to achieving the Mutual Exclusion property as +well. + +#### Reliability Improvement Alternatives + +There are a collection of approaches that we could take to strengthen row-level +locking to a degree that the row-level locks could be used for Read Committed. +To compare these alternatives, we first define a set of tests to evaluate each +approach. + +- **(T1) Provides Isolation**: Does the approach provide the **Isolation** + property? + +- **(T2) Provides Mutual Exclusion**: Does the approach provide the **Mutual + Exclusion** property? + +- **(T3) Lock Persistence**: Does the approach require a disk write per + row-level lock? + +- **(T4) Lock Replication**: Does the approach require replication per row-level + lock? + +- **(T5) Commit-Time Validation**: Does the approach require commit-time + validation of lock validity? + +- **(T6) Commit Starvation**: Is the approach susceptible to commit starvation + if a committer is pushed during a commit-time validation phase? + +- **(T7) Completeness**: Will the approach support all transactions? + +- **(T8) Abort Conditions**: What conditions will cause a transaction to abort + and return an error? + +Using these tests, we can then compare the base alternatives and their +specialized variants. + +At a high level, there are three alternative approaches: +- **Refresh Under Locks** +- **Validate Locks** +- **Replicate Locks** + +##### Alternative: Refresh Under Locks + +The **Refresh Under Locks** approach extends the existing Read Refresh +mechanism. While Read Committed transactions no longer refresh all of their +reads, the keys on which they acquired locks will be tracked in their refresh +span set and will be refreshed on commit. + +This approach provides isolation (**T1**) for the set of keys locked by a +transaction in the same way that Serializable transactions provide isolation for +all keys read, using a commit-time refresh (**T5**). However, it makes no +attempt to ensure that locks are still held at commit time, so it does not +provide mutual exclusion (**T2**). + +Because locks can be lost without violating isolation, the most basic version of +this approach does not need lock persistence (**T3**) or lock replication +(**T4**). However, a lost lock can lead to a refresh failure and a transaction +abort (**T8**) if the loss of the lock allows for a conflicting write to +succeed. + +Thanks to refresh span condensing, the approach is close to complete (**T7**), +assuming no contention. Even transactions that acquire millions of locks like +`SELECT * FROM billion_rows FOR UPDATE` should be able to commit, though span +condensing does create the opportunity for false positive refresh validation +failures. However, the approach is not complete with contention. A transaction +that acquires millions of locks will quickly exceed the maximum in-memory lock +limit and the locks will be lost. This can allow conflicting writes on +should-be-locked keys, causing the refresh to fail. + +Finally, assuming non-blocking write-read conflicts, the approach is subject to +starvation (**T6**). When a transaction begins to commit, it will refresh to its +current provisional commit timestamp. If its minimum commit timestamp is pushed +by a conflicting reader during this time, the transaction will need to refresh +again. This can starve, as is described in +[#95227](https://github.com/cockroachdb/cockroach/issues/95227). + +While the approach builds upon the existing Read Refresh mechanism, that +mechanism will need generalization to work with Read Committed. This is because +Read Committed transactions operate across many read snapshots. The refresh span +tracking must be augmented with a timestamp-per-span, which complicates the data +structure and the span condensing algorithm. The Read Refresh API already +supports per-span `RefreshFrom` timestamps, so this will be strictly a +client-side bookkeeping change. + +##### Alternative: Validate Locks + +The **Validate Locks** approach is similar in spirit to the **Refresh Under +Locks** approach. Both use a commit-time validation step to verify isolation. +However, the two approaches differ in what they verify. While **Refresh Under +Locks** looks for violations of isolation in the MVCC history of keys that were +locked, **Validate Locks** simply verifies that the locks that were acquired are +still held at commit time. + +The approach then provides isolation (**T1**) by construction — if locks protect +against conflicting writes that could violate isolation, then a commit-time +proof (**T5**) that all acquired locks still exist acts as a proof that +isolation has not (yet) been violated. Additionally, this serves as a proof that +mutual exclusion (**T2**) has not been violated, because no conflicting lock +could have been acquired since the committing transaction acquired its locks. + +The assumption with this approach is that neither lock persistence (**T3**) nor +lock replication (**T4**) would be used. As a result, lease transfers, node +crashes, memory limits (**T7**), or other events could lead to lost locks, which +would cause validation to fail and the lock acquirer to abort (**T8**). Unlike +**Refresh Under Locks**, this abort would occur even if there was no real +transaction contention. + +This approach also has a similar downside to **Refresh Under Locks**, which is +that the commit-time validation only provides a proof of isolation up to some +MVCC timestamp. The lock could be lost immediately after validation and the best +the validating transaction could do is bump the timestamp cache below the lock +during validation to its current provisional commit timestamp (in fact, it must +do this). However, if the committing transaction is then pushed, it would need +to re-validate its locks. This means that the approach is also subject to +starvation (**T6**) with non-blocking write-read conflicts. + +##### Alternative: Replicate Locks + +The **Replicate Locks** approach is similar to the **Validate Locks** approach +in that it also uses locks themselves to provide isolation (**T1**) and mutual +exclusion (**T2**) over the keys that it protects. However, it does not use a +commit-time validation step (**T5**) to determine whether locks are still held. +Instead, it replicates locks eagerly during transaction execution to eliminate +cases where they could be lost. + +There are a handful of variants of this approach, all of which include some form +of lock persistence (**T3**) and lock replication (**T4**). The simplest +approach would be to synchronously replicate locks during the execution of +`SELECT FOR {UPDATE/SHARE}` statements. A more sophisticated variant would be to +pipeline this lock acquisition like we do with [intent +pipelining](https://www.cockroachlabs.com/blog/transaction-pipelining/), at the +risk of pipelining errors. A third variant would be to eagerly acquire +unreplicated locks during the execution of `SELECT FOR {UPDATE/SHARE}` +statements and then promote these locks to replicated locks at commit-time, at +the risk of the unreplicated locks having been lost by this time. The variants +differ in performance but are otherwise similar. + +Replicating locks and storing them in persistent storage (using the replicated +lock table keyspace which was designed for such storage) prevents the locks from +being lost during lease transfers, node crashes, or memory limits (**T8**). As a +result, this approach is complete (**T7**). It can use client-side span +coalescing to scale to an arbitrary number of locks in roughly the same way that +a transaction can write to an arbitrary number of keys. + +Replicating locks also avoids the risk of commit starvation (**T6**) because it +forgoes commit-time validation entirely. Once a transaction has persisted all +locks, it is free to commit at any MVCC timestamp in the future. It can continue +to be pushed by contending readers without consequence. The only requirement is +that when locks are released after a transaction has committed and the locks are +not replaced by a committed version, the timestamp cache must be bumped to the +commit timestamp of the transaction to prevent conflicting writers from +re-writing portions of the "locked MVCC history". + +##### Variants and Optimizations + +As implied above, variants of the approaches exist and the approaches are not, +for lack of a better word, mutually exclusive. Aspects of some approaches could +be used to augment other approaches to arrive at strong alternatives. For +example, the commit-time lock replication variant discussed above can be viewed +as a hybrid of Lock Validation and Lock Replication. Lock Validation schemes +could also employ Lock Replication as a mechanism to spill locks on memory +limits or ship locks during lease transfers. + +One additional hybrid approach that deserves mention is **Lock Validation with +Validity Windows**. This approach exploits the fact that a stable leaseholder +can provide _some_ guarantees on lock validity, even if it can't guarantee lock +validity indefinitely. For instance, a leaseholder could make a guarantee that a +lock will only be lost if the leaseholder crashes. In doing so, it could +guarantee isolation on locked keys up to its lease expiration. As a result, +unreplicated lock acquisition would provide a validity window for each lock, and +a transaction would only need to validate its locks if it ran for long enough to +exceed this validity window. This hybrid approach provides many of the benefits +of each alternative — namely, it avoids synchronous lock replication but it also +avoids commit starvation. However, for a leaseholder to make such a guarantee, +it would likely employ lock replication in certain cases (e.g. memory limits), +so it makes sense to consider this hybrid approach as a future optimization. + +#### Reliability for Preview Release + +For the initial version of Read Committed, we propose a limited form of +synchronous **Lock Replication** as a mechanism to ensure lock reliability. This +is primarily due to the completeness **(T7)** and starvation **(T6)** properties +provided by the alternative. Performance is a secondary consideration, and the +performance of replicated locks can be optimized in future releases using the +suggestions outlined above. + +However, one performance optimization will be incorporated into the initial +design. While `SELECT FOR UPDATE` and `SELECT FOR SHARE` will synchronously +replicate locks, "implicit select for update" performed during the search phase +of certain mutation statements will continue to acquire unreplicated and +unvalidated locks. These locking reads are immediately followed by a write, so +they need not provide isolation on their own. Meanwhile, replicating these locks +would incur a severe latency cost. This is true even if the replication is +pipelined, because the subsequent write would immediately hit a pipeline stall +while waiting for the replication of the lock to complete. + +The decision of whether to replicate locks or not will be expressed from SQL +through the KV API using the existing `lock.Durability` flag. Only replicated +locks will provide isolation guarantees. + +## Query Planning and Execution + +To support the more stringent locking requirements of Read Committed, several +changes must be made to query planning and execution around lock acquisition. + +### Read-Only Queries (`SELECT`) + +Query planning and execution for read-only queries (i.e. `SELECT` statements) +will not change for Read Committed transactions, despite the use of +[per-statement read snapshots](#per-statement-read-snapshots). These queries +will continue to operate on a consistent snapshot of the system, so all planning +optimizations derived from SQL constraints remain valid. + +### Explicit Locking Queries (`SELECT FOR UPDATE`) + +Under our Serializable isolation, locking is not needed for correctness. Because +of this, our current implementation of `SELECT FOR UPDATE` takes some liberties +for better performance. + +- Locks are currently [only placed on the indexes scanned by the + query](https://github.com/cockroachdb/cockroach/issues/57031). If the `SELECT + FOR UPDATE` query never reads from the primary index of the table, it will not + place a lock there. This could prevent a `SELECT FOR UPDATE` query from + correctly blocking an `UPDATE` if the `SELECT FOR UPDATE` and the `UPDATE` + touch disjoint indexes. +- Locks are acquired [during the initial index scans of the + query](https://github.com/cockroachdb/cockroach/issues/75457), even if some + rows are later eliminated (e.g. by a filter or a join). This could cause us to + acquire unnecessary locks, potentially causing artificial contention. +- To avoid this artificial contention, locks are sometimes [not +acquired](https://github.com/cockroachdb/cockroach/blob/48ef0d89e6179c0d348a5236ad308d81fa392f7c/pkg/sql/opt/exec/execbuilder/mutation.go#L987-L1009) + at all. This could prevent `SELECT FOR UPDATE` from working in some cases. +- As described in [Reliability and Enforcement](#reliability-and-enforcement), + locks are best-effort, and may not persist until commit for various reasons. + +Under Read Committed these shortcuts could cause incorrect query execution. To +fix them, when necessary we will add an extra locking join to the top of the +query plan instead of locking during the initial row fetch. This will typically +be an index join or a lookup join to the primary index of the table to lock (or +multiple joins in the case of multiple tables to lock). + +This locking join will acquire fully-replicated locks to ensure the locks +persist until commit, as described in [Reliability for Preview +Release](#reliability-for-preview-release). The locking join will return a +`WriteTooOld` error if there have been any new versions committed to locked rows +after the statement read snapshot. + +#### Optimizer Locking Alternatives + +There are several alternative methods the optimizer could use to produce this +extra locking join for `SELECT FOR UPDATE`: +1. Add a new `Lock` operator. +2. Add a locking property to the exsting `Select` operator. +3. Add a new `LockedSelect` operator. +4. Use a physical property enforcer. + +##### Alternative: Lock Operator + +TODO(michae2): describe lock operator alternative + +##### Alternative: Locking Property in Select + +TODO(michae2): describe locking property in select alternative + +##### Alternative: LockedSelect Operator + +TODO(michae2): describe lockedselect operator alternative + +##### Alternative: Physical Property Enforcer + +TODO(michae2): describe physical property enforcer alternative + +#### Optimizer Change for Preview Release + +For the initial version of Read Committed + +#### Locking Individual Column families + +Narrowing lock scope to an individual column family of a row can help ensure +that the performance benefits of multiple column families are realized in +workloads with contention. For very simple `SELECT FOR UPDATE` queries, our +current implementation is able to lock an individual column family of a row, +rather than every column family, depending on how the initial row fetch of the +`SELECT FOR UPDATE` is constrained. + +With the changes for Read Committed, we expect that `SELECT FOR UPDATE` will be +able to lock only the necessary individual column families in more cases. This +is because the extra locking join will be able to use column-family-tight spans +in cases where the initial row fetch cannot. + +#### Write-Write Version Conflicts + +As discussed in [Write-Write Conflict +Handling](#write-write-conflict-handling-or-lost-update-intolerance), `SELECT +FOR UPDATE` statements can experience write-write version conflicts if new +versions of rows are discovered after acquiring locks. PostgreSQL uses a special +`EvalPlanQual` mode to handle these write-write version conflicts, which +re-evaluates some of the query logic on the new version of each locked row. We +will not implement an EPQ mode. Instead, on discovering new committed versions, +the locking join will fail with a `WriteTooOld` error which will cause the +statement to retry. + +### Reading Mutation Statements (`INSERT ON CONFLICT`, `UPDATE`, `DELETE`, etc.) + +"Reading mutation statements" are DML statements that both read from and write +to the database, such as most `UPDATE` statements. Under Serializable isolation +the read sets of these statements are validated at transaction commit time, to +avoid write skew. In our current implementation, reading mutation statements +sometimes acquire implicit row locks to try and avoid retries, but as mentioned +previously these locks are not needed for correctness. + +Surprisingly, query planning and execution for reading mutation statements do +not need to change for Read Committed, despite their potential to incur +write-write conflicts. This is because write-write version conflicts will be +detected when these statements write new versions of each row (lay down +intents). Any committed version newer than the mutation statement's read +snapshot will generate a `WriteTooOld` error, causing at least one of the +conflicting statements to retry, so mutation statement execution can remain +unaware of write-write conflict handling. + +This means that, as for `SELECT FOR UPDATE`, we will not implement EPQ mode for +mutation statements. Instead we will rely on statement retries to handle +write-write conflicts. + +And as described in [Reliability for Preview +Release](#reliability-for-preview-release), mutation statements can continue to +use unreplicated locks during their initial row fetch, because the initial +unreplicated locks do not need to persist until commit time for +correctness. + +FK checks performed at the end of mutation statements, however, will have to use +replicated locks to ensure we maintain FK constraints. (See [system-level SQL +constraints](#system-level-sql-constraints) below.) + +### Blind Mutation Statements (`INSERT`, `UPSERT`, `DELETE`, etc.) + +"Blind mutation statements" are DML statements that write to the database +without reading, such as most `UPSERT` statements. These statements cannot incur +lost updates, so it should almost never be necessary to retry these statements +at the conn_executor level. Fortunately, these statements can benefit from use +of server-side read refreshes, [as outlined +previously](#retry-avoidance-through-read-refreshes). + +### Common Table Expressions and User-Defined Functions + +Planning and execution for CTEs and UDFs does not need to change for Read +Committed isolation. + +CTEs, and both `STABLE` and `IMMUTABLE` UDFs will perform their reads at the +same read snapshot as the main statement. Any locking or mutation statements +they contain will perform in the manner described above. If a CTE or a UDF +encounters a `WriteTooOld` error due to a write-write conflict, the entire main +statement will retry. + +`VOLATILE` UDFs will perform their reads at the same read snapshot as the main +statement, but with [a later sequence +number](https://github.com/cockroachdb/cockroach/blob/08ac8fde23e42cf26677a3dfd1c3a0fb60e40f65/pkg/sql/routine.go#L44-L59), +allowing them to read writes performed by the main statement, but not writes +committed from later transactions. If a `VOLATILE` UDF encounters a +`WriteTooOld` error it will also cause the main statement to retry. + +### System-level SQL Constraints + +System-level constraints must be enforced correctly regardless of isolation +level. Under Read Committed isolation, this will require holding replicated +locks for all constraint checks and cascades (whether executed inline or as +post-queries). + +[Foreign key checks](https://github.com/cockroachdb/cockroach/issues/80683) will +change to use `SELECT FOR SHARE` locks. + +Foreign key cascades will not change. The intents they write will function as +replicated locks for the duration of the transaction. + +`UNIQUE` checks will not change. These checks are always handled through a +combination of `InitPut` or `CPut` commands and careful key encoding when +mutating a row. The intents written by the row mutation will function as +replicated locks. + +`UNIQUE WITHOUT INDEX` checks cannot easily be implemented correctly under Read +Committed using only row locking, because they depend on the non-existence of a +span of rows. Initially we will disallow the enforcement of `UNIQUE WITHOUT +INDEX` checks in transactions run under Read Committed, so these transactions +will be unable to insert into tables with this form of constraint. Consequently, +`REGIONAL BY ROW` tables will be inaccessible to Read Committed transactions in +the initial preview. Eventually we will allow `UNIQUE WITHOUT INDEX` checks if +the check can be built using single-row spans (i.e. if there is an index which +only has enum columns before the `UNIQUE WITHOUT INDEX` columns). This will +require taking `SELECT FOR SHARE` locks on non-existent rows. + +`CHECK` constraint checks will have to acquire `SELECT FOR SHARE` locks on any +unmodified column families if the constraint references multiple column +families. This may negate the benefit of column families in some cases. + +### Transaction Orchestration + +TODO(nvanbenschoten): Work with Rafi to flesh this section out. Mention +connExecutor and changes for: +- Updating txn read snapshot on each statement. Done through existing calls to Txn.Step +- Retrying statements on retry errors. Needs to error handling logic. + +## Configuration + +Configuration of the Read Committed isolation level will be handled through the +standard session variable infrastructure. + +Individual transactions can be configured to run at the Read Committed isolation +level using any of the following configuration methods: +```sql +-- set isolation in BEGIN statement +BEGIN TRANSACTION ISOLATION LEVEL READ COMMITTED; + +-- set isolation of current transaction (statement style) +BEGIN; SET TRANSACTION ISOLATION LEVEL READ COMMITTED; + +-- set isolation of current transaction (variable style) +BEGIN; SET transaction_isolation = 'read committed'; +``` + +The default isolation level for all future transactions can be set to Read +Committed using any of the following configuration methods: +```sql +-- set default isolation level (statement style) +SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED; + +-- set default isolation level (variable style) +SET default_transaction_isolation = 'read committed'; + +-- set default isolation level (connection parameter style) +cockroach sql -–url='postgres://root@hostname:26257/db?options=-c default_transaction_isolation=read%20committed' +``` + +Like in PostgreSQL, READ UNCOMMITTED will also be accepted by all of these +configurations and will map to READ COMMITTED. + +## Configuration Introspection + +Observability into the current transaction's isolation level and the session's +default isolation level will both also be handled through the standard session +variable infrastructure: +```sql +SHOW transaction_isolation; + +SHOW default_transaction_isolation; +``` + +### Configuration Migration + +Currently, CockroachDB accepts many of these configurations. However, it ignores +them and provides applications with Serializable isolation instead. This poses a +risk that applications which use CockroachDB today are unwittingly asking for +Read Committed, either directly or indirectly (e.g. through an ORM). These same +applications may not be prepared to suddenly jump to Read Committed when +upgrading CockroachDB to v23.2. + +To hedge against this risk and provide users that fall into this situation with +quick remediation, a hidden cluster setting will be introduced to disable the +use of Read Committed and retain the existing mappings in all configurations +from "read committed" isolation to "serializable" isolation. The cluster setting +can be removed in a future release. + +## Observability + +Details about transaction isolation will be added to the following three system +tables: +``` +crdb_internal.node_transactions +crdb_internal.cluster_transactions +crdb_internal.cluster_locks +``` + +Surfacing transaction isolation and its impact on transaction contention in the +DB console is important, but it is beyond the scope of this RFC. + +## Miscellaneous Interactions + +### With Serializable Isolation + +Serializable and Read Committed transactions can coexist in relative harmony. As +described earlier, the isolation levels differ primarily in their tolerance to +write skew and in their selection of read snapshot(s). These differences are +internal concerns that do not affect other concurrent transactions, so mixing +transaction isolation levels will not cause problems. Regardless of isolation +level, transactions will continue to read from consistent snapshots of the +system and the commit of any transaction will remain atomic to all other +transactions. + +A serializable history will still be possible to construct from all transactions +running at Serializable isolation. However, it will not always be possible to +place Read Committed transactions into that history. + +The one interaction between the two isolation levels that deserves discussion is +write-read conflicts where the writer is a Serializable transaction and the +reader is a Read Committed transaction. We discussed earlier that we intend to +make write-read conflicts non-blocking for all isolation levels. However, we +will only initially make that change for Read Committed transactions because +there are fewer consequences to doing so. During this intermediate period, we +could either block the Read Committed transaction on the Serializable +transaction to avoid pushing the Serializable transaction and forcing it to +refresh, or we could make the conflict non-blocking to avoid unexpectedly +blocking the Read Committed transaction. We chose the latter option to ensure +that reads in Read Committed transactions never block, potentially at the cost +of Serializable transaction retries. + +### With Implicit Transactions + +Implicit transactions are single-statement transactions without `BEGIN` or +`COMMIT` statements. The transactions benefit from [automatic +retries](https://www.cockroachlabs.com/docs/stable/transactions.html#automatic-retries) +thanks to the CockroachDB SQL gateway hearing about the entire transaction at +once. + +Read Committed's "per-statement Snapshot isolation" transaction model implies +that these implicit transactions will behave identically to a potential Snapshot +isolation level. Namely, they will operate at a single read snapshot and will +permit skew between their commit timestamp and this read snapshot's timestamp. + +### With One-Phase Commit + +One-Phase Commit is a fast-path for mutations that perform writes to a single +range and issue these writes in a batch with their EndTxn request. These +transactions can avoid two-phase commit, bypassing the use of intents or a +transaction record and writing committed values directly. + +Read Committed transactions will have access to this fast-path. The only +relevant difference between Read Committed transactions and Serializable +transactions is that Read Committed transactions have a more lenient commit +condition (skew between their read and write timestamps are permitted) and the +one-phase commit fast-path must be made aware of this. + +### With AS OF SYSTEM TIME + +The [`AS OF SYSTEM +TIME`](https://www.cockroachlabs.com/docs/stable/as-of-system-time.html) clause +can be added to individual statements or to entire read-only transactions, +instructing CockroachDB to read at a historical MVCC snapshot. This is an +important feature that underpins follower reads. + +In some regard, such a feature is incompatible with the Read Committed isolation +level, which captures a new MVCC snapshot for each statement in a transaction. +For this reason, PostgreSQL disallows the use of `SET TRANSACTION SNAPSHOT` +(analogous to `AS OF SYSTEM TIME`) in Read Committed transactions. When issued, +an error is returned: +``` +ERROR: a snapshot-importing transaction must have isolation level SERIALIZABLE or REPEATABLE READ +``` + +Proscribing `AS OF SYSTEM TIME` in Read Committed transactions would cause +confusion and inconvenience for users of CockroachDB, especially when attempting +to use follower reads. Instead of banning it, the syntax will be accepted in +`BEGIN` and `SET` statements and the transaction will be promoted to a +read-only, Serializable transaction[^6] with the specified fixed read timestamp +across all statements, which matches the user's intent. Such transactions are +not subject to retry errors. + +[^6]: read-only, Serializable isolation transactions are equivalent to + read-only, Snapshot isolation transactions. + +As an extension, the syntax can also be accepted on individual `SELECT` +statements in a Read Committed transaction. This will instruct the transaction +to run the individual statement at the specified fixed read timestamp. The +utility of this may be limited, but the behavior matches expectations of Read +Committed transactions. + +### With Non-Standard SELECT FOR UPDATE Wait Policies + +TODO(michae2): write this after the Explicit Row-Level Locking (SELECT FOR +UPDATE) above. + +#### NOWAIT + +#### SKIP LOCKED + +### With Schema Changes + +The schema change protocol runs partially within user transactions, so special +care must be taken before permitting Read Committed transactions to perform +schema changes. The protocol depends on a serialization of versions for each +individual descriptor and on cross-descriptor consistency. Consequently, lost +updates on a single descriptor and write skew across descriptors must both be +prohibited. + +The use of explicit row-level locking (`SELECT FOR SHARE`) during descriptor +lookups may be sufficient to eliminate these hazards. However, for the preview +release of Read Committed, schema changes in Read Committed transactions will be +disabled. + +### With Column Families + +TODO(nvanbenschoten): prove that writes to disjoint column families cannot cause +index corruption. + +### With CDC + +Read Committed has no meaningful interaction with CDC. Committed values from +Read Committed transactions will be published on rangefeed subscriptions during +Raft log application like in any other transaction. + +### With Multi-Region + +Read Committed has only limited interactions with multi-region. At a high level, +this is because Read Committed weakens isolation guarantees but not consistency +guarantees. Consistency is where much of the cost of multi-region comes from. + +With that said, Read Committed transactions run in a multi-region deployment +will still see some benefit from their weaker isolation guarantees. For example, +Read Committed transactions avoid refreshing reads on commit, which can be +costly in a read-write transaction that reads from a remote leaseholder. + +On the other hand, the high network latencies in a multi-region cluster may also +expand the timing window for isolation-related anomalies to occur in Read +Committed transactions. Anomalies that may be rare in single-region clusters +(because transactions commit with low latency) may become more common in +multi-region clusters. + +TODO(michae2): mention `UNIQUE WITHOUT INDEX` constraints + +### With Multi-Tenancy + +Read Committed has no meaningful interaction with multi-tenancy. Tenants can run +transactions at any isolation level without concern. + +However, the introduction of non-blocking write-read conflicts will simplify the +resolution of [#71946](https://github.com/cockroachdb/cockroach/issues/71946). + +# Testing + +To gain confidence in an implementation of Read Committed, testing will be +needed at multiple levels. As always, individual changes and components will be +validated using targeted unit tests. However, ensuring transaction isolation +guarantees is a cross-cutting concern, so integration testing across the +database stack will be paramount. + +To verify the integrity of changes made for Read Committed to the key-value +layer, [kvnemesis](https://github.com/cockroachdb/cockroach/blob/fadd137a98540a317a379e938a7545fa70590cb4/pkg/kv/kvnemesis/doc.go) +will be enhanced in four ways: +1. The framework will be updated to run transactions at weak isolation levels. + The framework's validator will be taught to permit write skew for + transactions run below Serializable isolation. +2. The framework will be updated to step read committed transactions through + multiple read snapshots, mimicking the style of use expected from SQL when + encountering statement boundaries. +3. The framework's generator will be updated to issue locking reads with a + SHARED lock strength. +4. The framework's generator will be updated to issue locking reads with a + REPLICATED lock durability. The framework's validator will then be taught to + expect stronger (serializable-like) isolation guarantees from keys locked by + a transaction with replicated locks. + +Integration testing of Read Committed that exercises SQL will be also +introduced. + +CockroachDB's existing suite of logictests will be extended to run transactions +under Read Committed isolation. Very few logictests exercise concurrency, so +they should behave no different than if run under Serializable isolation. The +few logictests that do manipulate multiple session concurrency will need to be +updated with isolation-specific expectations. + +[Elle](https://github.com/jepsen-io/elle) is a transactional consistency checker +for black-box databases, which supports weak isolation levels has been +integrated into [Jepsen](https://github.com/jepsen-io/jepsen). CockroachDB +already integrates Jepsen into its nightly test suite. This testing will be +expanded to exercise Read Committed and validate correctness. + +[Hermitage](https://github.com/ept/hermitage) is a test suite that consists of +transaction histories that simulates various concurrency issues. The test suite +will be manually run against CockroachDB's Read Committed isolation level to +ensure that it is subject to expected concurrency anomalies and not subject to +unexpected anomalies. It will also be updated to reflect the addition of new +isolation levels to CockroachDB. + +PostgreSQL's [isolation test +suite](https://github.com/postgres/postgres/blob/36f40ce2dc66f1a36d6a12f7a0352e1c5bf1063e/src/test/isolation/README) +contains a set of tests for concurrent transaction behaviors running at +different isolation levels in PostgreSQL. This test suite will be hooked up to +CockroachDB. While the behavior of transactions differs between CockroachDB and +PostgreSQL in multiple ways, it will be useful to see how much of the test suite +passes against CockroachDB and to understand precisely why the tests that fail +do so. + +Nightly testing of TPC-C will be adapted to run at the Read Committed isolation +level. When doing so, transaction retry loops will be removed from the workload +to validate that retry errors are rare or non-existent under weak isolation. +TPC-C provides a useful sandbox to test weaker isolation levels because it +contains three moderately complex read-write transactions, two read-only +transactions, a diverse schema with referential integrity constraints, and +twelve post-workload consistency checks. + +Finally, existing workloads that run against Read Committed in other DBMS +systems will be solicited from customers. Where possible, these will be run +against CockroachDB's implementation of Read Committed to validate correctness, +sufficient completeness of the Read Committed implementation, and expected +properties like no retry errors and non-blocking reads. + +# Performance + +TODO(nvanbenschoten): from @bdarnell: +> I'd like to see a section on performance. There are a number of ways that this +> proposal affects performance, both good (less wasted work doing retries, no +> pre-commit span refreshes), and bad (lock replication, savepoint overhead, +> explicit locks for FK checks). Aside from the correctness concerns, how can we +> characterize the net expected performance of the two isolation levels? +> +> There are also more subtle performance-related risks: Many applications have +> isolation-related bugs that go undetected because the app simply doesn't get +> enough traffic to hit the necessary race conditions. If your sub-millisecond +> operations suddenly start to take longer because foreign key checks now +> involve multiple RPCs, these latent bugs may be exposed. + +# Variation from PostgreSQL + +The form of the Read Committed isolation level presented here is strictly +stronger than what is found in PostgreSQL. The difference between these two +implementations is in how they handle write-write conflicts during mutation +statements. + +The "Per-Statement Snapshot Isolation" model presented here retries individual +statements on write-write conflicts, ensuring that within a single statement, no +lost updates are permitted. This stronger model avoids certain anomalies that +could allow a mutation statement to perceive non-atomic commits of other +transactions. In exchange, this stronger model is subject to internal +per-statement retries. + +The "Postgres-Compatible Intra-Mutation Consistency" model breaks mutation +statements into a search phase, a locking phase, and a predicate re-evaluation +phase. This decomposition avoids any per-statement retries. In exchange, it can +permit intra-statement lost updates and other anomalous behavior when +write-write conflicts are experienced. + +A more complete comparison between the two models is presented in the +[appendix](#appendix-postgres-compatible-intra-mutation-consistency). This +comparison is accompanied by a discussion of how Postgres-Compatible +Intra-Mutation Consistency might be implemented in CockroachDB. + +# Drawbacks + +The primary drawback of introducing the Read Committed isolation level is that +it provides users with a tool to weaken the correctness guarantees of +CockroachDB. Unwitting users may employ Read Committed for performance reasons +without understanding the trade-offs, leading to unexpected correctness bugs or +data corruption. + +This is a real risk. Yet, the risk of not providing users with this +configuration and failing to support a large class of applications is greater. +Instead, we will combat this concern with ample documentation to help users make +well-informed decisions about the performance/correctness trade-off. + +# Unresolved questions + +## Should we implement Snapshot isolation at the same time? + +As alluded to in this proposal, the changes needed to implement Snapshot +isolation are contained within the changes needed to implement Read Committed. +As a result, it will be a small lift for us to expose Snapshot isolation after +making these changes. Conceptually, Snapshot isolation is identical to Read +Committed except that it chooses _not_ to advance a transaction's read snapshot +at each statement boundary. + +We propose not to expose this isolation level immediately to keep engineering +efforts focused and to reduce the scope of testing work needed to gain +confidence in these changes. Still, we note that doing so will be a small lift +if/when we decide that such an isolation level is needed. + +## If we implement Snapshot isolation, should we call it Repeatable Read? + +Strictly speaking, the two isolation levels are not the same. Repeatable Read +(PL-2.99 in Adya) permits Phantom Reads but does not permit Write Skew. Snapshot +isolation (PL-SI in Adya) does not permit Phantom Reads but does permit Write +Skew. + +However, there is ample precedent for conflating the two with minimal concern. +Chiefly, PostgreSQL itself implements a form of Snapshot isolation and calls it +Repeatable Read to remain ANSI SQL compliant. Therefore, if we decide to +implement Snapshot isolation, we propose that we also call it Repeatable Read. + +## Should Read Committed become the new default isolation level in CockroachDB? + +We do not plan to immediately change the default isolation level in CockroachDB. +If such a decision is made at some later point, it will be separate from the +initial design and implementation effort. + +# Appendix: Examples + +In the following examples, consider the schema: +```sql +create table kv (k int primary key, v int); +``` + +### SELECT behavior (without FOR UPDATE) + +```sql +truncate table kv; +insert into kv values (1, 5); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| `select * from kv;`
`k \| v`
`--+---`
`1 \| 5` | | +| | `insert into kv values (2, 6);` | +| `select * from kv;`
`k \| v`
`--+---`
`1 \| 5` | | +| `insert into kv values (3, 7);` | | +| `select * from kv;`
`k \| v`
`--+---`
`1 \| 5`
`3 \| 7` | | +| | `commit;` | +| `select * from kv;`
`k \| v`
`--+---`
`1 \| 5`
`2 \| 6`
`3 \| 7` | | +| `commit;` | | + +### SELECT FOR UPDATE behavior + +```sql +truncate table kv; +insert into kv values (0, 5), (1, 5), (2, 5), (3, 5), (4, 1); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| | `insert into kv values (5, 5);` | +| | `update kv set v = 10 where k = 4;` | +| | `delete from kv where k = 3;` | +| | `update kv set v = 10 where k = 2;` | +| | `update kv set v = 1 where k = 1;` | +| | `update kv set k = 10 where k = 0;` | +| `select * from kv where v >= 5 for update;`
`... waits ...` | | +| | `commit;` | +| `... waiting completes`
`k \| v`
`--+---`
`2 \| 10`
`4 \| 10`
`5 \| 5`
`10 \| 5` | | +| `commit;` | | + +### UPDATE and DELETE behavior + +```sql +truncate table kv; +insert into kv values (0, 5), (1, 5), (2, 5), (3, 5), (4, 1); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| | `insert into kv values (5, 5);` | +| | `update kv set v = 10 where k = 4;` | +| | `delete from kv where k = 3;` | +| | `update kv set v = 10 where k = 2;` | +| | `update kv set v = 1 where k = 1;` | +| | `update kv set k = 10 where k = 0;` | +| `update kv set v = 100 where v >= 5;`
`... waits ...` | | +| | `commit;` | +| `... waiting completes` | | +| `select * from kv`
`k \| v`
`--+---`
`1 \| 1`
`2 \| 100`
`4 \| 100`
`5 \| 100`
`10 \| 100` | | +| `commit;` | | + +### INSERT behavior + +Insert a new key that has just been changed by another transaction: + +```sql +truncate table kv; +insert into kv values (1, 1); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| | `update kv set k = 2 where k = 1;` | +| `insert into kv values (2, 1);`
`... waits ...` | | +| | `commit;` | +| `... waiting completes`
`ERROR: duplicate key value violates unique constraint "kv_pkey"` | | +| `rollback;` | | + +Insert a new key that has just been changed by another transaction, with `ON CONFLICT`: + +```sql +truncate table kv; +insert into kv values (1, 1); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| | `update kv set k = 2 where k = 1;` | +| `insert into kv values (2, 1) on conflict (k) do update set v = 100;`
`... waits ...` | | +| | `commit;` | +| `... waiting completes` | | +| `select * from kv`
`k \| v`
`--+---`
`2 \| 100` | | +| `commit;` | | + +Insert an old key that has been removed by another transaction: + +```sql +truncate table kv; +insert into kv values (1, 1); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| | `update kv set k = 2 where k = 1;` | +| `insert into kv values (1, 1);`
`... waits ...` | | +| | `commit;` | +| `... waiting completes` | | +| `select * from kv`
`k \| v`
`--+---`
`1 \| 1`
`2 \| 1` | | +| `commit;` | | + +Insert an old key that has been removed by another transaction, with `ON CONFLICT`: + +```sql +truncate table kv; +insert into kv values (1, 1); +``` + +| Client 1 | Client 2 | +| -------- | -------- | +| `begin transaction isolation level read committed;` | | +| | `begin transaction isolation level read committed;` | +| | `update kv set k = 2 where k = 1;` | +| `insert into kv values (1, 1) on conflict (k) do update set v = 100;`
`... waits ...` | | +| | `commit;` | +| `... waiting completes` | | +| `select * from kv`
`k \| v`
`--+---`
`1 \| 1`
`2 \| 1` | | +| `commit;` | | + +# Appendix: Proof of Correctness + +We demonstrate that this model for Read Committed is stronger than[^7] +Berenson's characterization of Read Committed and Adya's characterization of +Read Committed (PL-2). We also demonstrate that it is equivalent to[^8] ANSI +SQL's characterization of Read Committed (Degree 2). + +[^7]: Using [Berenson et al.'s + definition](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-95-51.pdf) + of stronger than, denoted `L1 » L2`. +[^8]: Using [Berenson et al.'s + definition](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-95-51.pdf) + of equivalent to, denoted `L1 == L2`. + +Berenson defines Read Committed as proscribing Dirty Write (P0) and Dirty Read +(P1) and permitting Non-repeatable Read (P2), Phantom Read (P3), Lost Update +(P4), and Read/Write Skew (A5). Our transaction model does not permit P0 because +transactions are not permitted to overwrite uncommitted writes from other +transactions. Our transaction model also does not permit P1 because all reads +are served out of the per-statement reads snapshots established at the beginning +of each statement. However, P3 and P4 are both allowed because subsequent +statements can use different read snapshots, and so different statements in the +same transaction can see different data. A5 is also permitted, because read +locks are not acquired during reads, allowing both forms of skew to occur. + +| | P0
Dirty Write | P1
Dirty Read | P2
Fuzzy Read | P3
Phantom | P4C
Cursor Lost Update | P4
Lost Update | A5A
Read Skew | A5B
Write Skew | +| ------------ | ----------------- | ---------------- | ---------------- | ------------- | ------------------------- | ---------------------- | ---------------- | ----------------- | +| Berenson RC | Not Possible | Not Possible | Possible | Possible | Possible | Possible | Possible | Possible | +| CRDB RC | Not Possible | Not Possible | Possible* | Possible* | Possible* | Possible* | Possible* | Possible | + +While permitted by this formalization, our transaction model only allows some +forms of P2, P3, P4, P4C, and A5A. Read anomalies are not permitted within a +statement because each statement operates using a consistent snapshot. +Similarly. lost updates between the reads and writes in a statement are not +permitted because a first writer wins conflict resolution policy is applied to +such conflicts. However, all of these anomalies are permitted across statements. + +Adya defines Read Committed (PL-2) as proscribing G0 (Write Cycles), G1a +(Aborted Reads), G1b (Intermediate Reads), and G1c (Circular Information Flow) +and permitting G-single (Single Anti-dependency Cycles), G2-item (Item +Anti-dependency Cycles), and G2 (Anti-dependency Cycles). Our transaction model +does not permit G0 because transactions are not permitted to overwrite +uncommitted writes from other transactions. Our transaction model also does not +permit G1a, G1b, or G1c because all reads are served out of the per-statement +reads snapshots established at the beginning of each statement. These snapshots +contain writes from committed transactions, and only the final version of those +writes on any given row. However, G-single, G2-item, and G2 are all permitted +because our model allows a Read Committed transaction's read timestamp(s) to +skew from its commit timestamp ("Write Skew Tolerance"). + +| | G0
Write Cycles | G1a
Aborted Reads | G1b
Intermediate Reads | G1c
Circular Information Flow | G-single
Single Anti-dependency Cycles | G2-item
Item Anti-dependency Cycles | G2
Anti-dependency Cycles | +| --------- | ------------------ | ---------------- | ---------------- | ------------- | ------------- | ---------- | ----------- | +| Adya PL-2 | Not Possible | Not Possible | Not Possible | Not Possible | Possible | Possible | Possible | +| CRDB RC | Not Possible | Not Possible | Not Possible | Not Possible | Possible* | Possible | Possible | + +As with the previous analysis, our transaction model is stronger than required +by Adya because it does not permit G-single within a single statement. + +Finally, ANSI SQL defines Read Committed as proscribing Dirty Read (P1) and +permitting Non-repeatable Read (P2) and Phantom Read (P3). As demonstrated +above, out transaction model does not permit P1 but does permit P2 and P3. +Therefore, it is _equivalent to_ ANSI SQL's definition of Read Committed. + +| | P1
Dirty Read | P2
Non-repeatable Read | P3
Phantom Read | +| ------- | ---------------- | ------------------------- | ------------------ | +| ANSI RC | Not Possible | Possible | Possible | +| CRDB RC | Not Possible | Possible | Possible | + +# Appendix: Postgres-Compatible Intra-Mutation Consistency + +This RFC proposed a "Per-Statement Snapshot Isolation" transaction model for +Read Committed. An alternative approach considered was a "Postgres-Compatible +Intra-Mutation Consistency" transaction model. + +## Comparison + +The difference between these two models is how they handle write-write conflicts +during mutation statements. + +The "Per-Statement Snapshot Isolation" model presented here retries individual +statements on write-write conflicts, ensuring that within a single statement, no +lost updates are permitted. This stronger model avoids certain anomalies that +could allow a mutation statement to perceive non-atomic commits of other +transactions. In exchange, this stronger model is subject to internal +per-statement retries. + +The "Postgres-Compatible Intra-Mutation Consistency" model breaks mutation +statements into a search phase, a locking phase, and a predicate re-evaluation +phase. This decomposition avoids any per-statement retries. In exchange, it can +permit intra-statement lost updates and other anomalous behavior when +write-write conflicts are experienced. + +## Write-Write Conflict Handling + +Per-statement read snapshots are one major difference between a Read Committed +implementation and a Serializable (or hypothetical Snapshot) implementation. The +other major difference is in the handling of write-write conflicts. Where a +Serializable transaction would throw a serialization error on a write-write +conflict, Read Committed transactions wait for the conflict to resolve (e.g. the +conflicting transaction to commit or abort and release locks) and then continue +running. + +To understand this, we first decompose the definition of a write-write conflict +as follows: + +**Write-Write Locking Conflict**: any case where a transaction attempts to lock +or write to a key that is locked with an exclusive lock by a different +transaction, where intents are considered to be a combination of an exclusive +lock and a provisional value. + +**Write-Write Version Conflict**: any case where a transaction attempts to lock +or write to a key that has a _committed version_ with an MVCC timestamp greater +than the locking/writing transaction's current read snapshot. + +We define _write-write version conflict_ in terms of a transaction's "current +read snapshot" (i.e. `txn.ReadTimestamp`) to afford flexibility in the +definition to transactions that change their read snapshot across their +execution. For example, Read Committed transactions advance their read snapshot +on each statement boundary, so a committed version that would cause a +write-write version conflict for one statement may not cause a write-write +version conflict for a later statement in the same transaction. + +Read Committed transactions handle _write-write locking conflicts_ identically +to Serializable transactions. The prospective locker +[waits](#blocking-write-write-conflicts) for the existing exclusive lock to be +released before acquiring it. In cases where multiple transactions wait for +exclusive access to the same key, they form an orderly queue through the +`lockWaitQueue` mechanism. + +Once a Read Committed transaction has navigated any potential _write-write +locking conflict_, it may experience a _write-write version conflict_. In such +cases, locking read operations (e.g. `Get(key, Exclusive)`) return the latest +committed version of the key, regardless of the reader's read snapshot. Writing +operations (e.g. `Put(key, value)`) place an intent on the key with a version +timestamp above the latest committed version. + +In both cases, the operations advance the locker/writer's `WriteTimestamp` above +the committed version's timestamp. Recall that while a transaction is running, +the `WriteTimestamp` serves as its provisional commit timestamp, forming a lower +bound on the MVCC timestamp that the transaction can commit at. + +#### Impact of Write-Write Conflict Handling on KV API + +This necessitates a change in the KV API's handling of write-write version +conflicts between transaction isolation levels. This difference is summarized +below: + +| Operation type | SI Read Version | SI WW Version Conflict | RC Read Version | RC WW Version Conflict | +| ---------------- | ----------------- | -------------------------- | ----------------- | -------------------------- | +| Non-locking Read | [txn.ReadTimestamp](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/storage/pebble_mvcc_scanner.go#L791) | N/A | txn.ReadTimestamp | N/A | +| Locking Read | [txn.ReadTimestamp](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/storage/pebble_mvcc_scanner.go#L791) | [WriteTooOldError](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/storage/pebble_mvcc_scanner.go#L832) | latest version | txn.WriteTimestamp.Forward | +| Write-Only | N/A | [txn.WriteTimestamp.Forward](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvserver/replica_evaluate.go#L371), then [WriteTooOldError](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvclient/kvcoord/txn_interceptor_span_refresher.go#L259) | N/A | txn.WriteTimestamp.Forward | +| Read-Write | [txn.ReadTimestamp](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/storage/mvcc.go#L2050) | [WriteTooOldError](https://github.com/cockroachdb/cockroach/blob/8e24570fa366ed038c6ae65f50db5d8e22826db0/pkg/kv/kvserver/replica_evaluate.go#L365) | latest version | txn.WriteTimestamp.Forward | + +Users of the KV API must be aware of this change in write-write version conflict +handling or they will risk lost updates. Specifically, a non-locking read of a +key followed by a write to that key in the same transaction could ignore a +committed version between the transaction's read and write timestamp. + +Users of the KV API must also be aware that this change in write-write version +conflict handling can expose locking read and read-write operations to an +inconsistent snapshot of the system. + +We will later see how [SQL handles these +situations](#mutations-update-delete-etc), exploiting the weakened consistency +guarantees to avoid retries while still providing reasonably sound semantics. +However, in general, we recommend that most users of the raw KV API continue to +use Serializable transactions. + +## Post-Locking Predicate Re-evaluation + +A "Postgres-Compatible Intra-Mutation Consistency" transaction model requires +additional SQL planning and execution logic beyond the "Per-Statement Snapshot +Isolation" model in order to handle the predicate re-evaluation step. The +following proposal was considered as an approach to closely (but not exactly) +match the postgres predicate re-evaluation behavior. The complexity necessary to +fully implement this behavior was a factor in the decision to instead implement +the "Per-Statement Snapshot Isolation" model. + +The proposed design hoped to achieve a few desirable properties: +1. There should not be significant overhead for the happy path (no conflicts). +2. CRDB should not exhibit anomalies that are prevented in Postgres. +3. The solution should be general, and shouldn’t require special execution-time + handling for individual operators (e.g. the Postgres + [EvalPlanQual](https://github.com/postgres/postgres/blob/0bc726d95a305ca923b1f284159f40f0d5cf5725/src/backend/executor/README#L350-L351) + step for scans). + +### Motivation + +Under the "Postgres-Compatible Intra-Mutation Consistency" model, predicate +re-evaluation is necessary becuase a row may be updated between when it is read +and filtered, and when it is locked. Note that this is not a concern when all +predicates can be inlined into a locking scan, because locking and filtering +happen at the same time in this case. Re-evaluation becomes necessary when the +query scans, filters, and then locks rows in separate steps. + +### Requirements + +Postgres places the following restrictions on the syntax of a query that +performs locking: + +1. Locking is [not permitted](https://www.postgresql.org/docs/current/sql-select.html) for + tables within GROUP BY, HAVING, WINDOW, DISTINCT, UNION, INTERSECT, or EXCEPT + clauses. +2. In addition, postgres [doesn't allow](https://github.com/postgres/postgres/blob/eae0e20deffb0a73f7cb0e94746f94a1347e71b1/src/backend/optimizer/plan/initsplan.c#L1415-L1442) + locking to include tables from the null-extended side of an outer join. + +Note that this applies both to explicit locking specified using `SELECT FOR +UPDATE` syntax, as well as the implicit locking added for mutations (applied +only to the target table). Because of these restrictions, only mutations, WHERE +clauses, and joins have to be considered for a re-evaluation approach. + +### The Approach + +Under the re-evaluation approach, a query with locking follows these steps: +1. Execute the main query, as normal. +2. Lock the qualifying rows of any locked tables. +3. Retrieve the most up-to-date values for the locked rows. +4. Logically re-evaluate the query for the rows that changed. + +The following sub-sections will specify how these steps might be implemented. + +Note that the examples shown will have some details omitted for clarity. Some of +the SQL syntax is simplified for the same reason. + +#### Lock Operator + +Given a query that scans and then filters rows, locking can be handled by adding +a lookup join onto the primary key of each locking table, which locks each row +and returns its most recent version. This takes advantage of the aforementioned +KV api changes. Re-evaluation is then performed using these updated values. + +Consider the following example query: +``` +SELECT * FROM customers c +WHERE NOT EXISTS (SELECT * FROM orders o WHERE o.id = c.order_id) +FOR UPDATE OF c; +``` +The rewrite to use a locking lookup join would look approximately like this: +``` +SELECT locked.* FROM +( + SELECT * FROM customers c + WHERE NOT EXISTS (SELECT * FROM orders o WHERE o.id = c.order_id) +) INNER LOOKUP JOIN customers locked ON c.id = locked.id +FOR UPDATE OF locked; +``` +The remainder of the query (including predicate re-evaluation) would proceed +with the updated values returned by the lookup join. This covers steps (1) and +(2) of the approach. + +For convenience, assume that the locking lookup join also projects a boolean +column that indicates whether a given row was updated. + +#### WHERE Clause + +WHERE clause handling is straightforward; given the updated and locked rows +returned by the locking lookup join, we re-evaluate the WHERE clause predicates +using the updated values. + +Consider again the example query: +``` +SELECT * FROM customers c +WHERE NOT EXISTS (SELECT * FROM orders o WHERE c.id = o.cust_id) +FOR UPDATE OF c; +``` +The predicate from the WHERE clause would be duplicated, and made to reference +the updated values returned by the locking lookup join from the previous step: +``` +SELECT locked.* FROM (...) locked +WHERE NOT EXISTS (SELECT * FROM orders o WHERE o.id = locked.order_id) +``` +This would remove any new values of `order_id` that don't pass the predicate. + +#### Joins + +A locking query can perform INNER or LEFT joins between the locked table and +arbitrary subqueries. Note that the FROM and USING clauses of UPDATE and DELETE +statements perform implicit INNER joins. For now, ignore the possibility of +locking multiple tables. + +The difficulty is that the non-locking subqueries can be arbitrarily complex, +and their results can depend on rows from the locking relation for correlated +subqueries. Since the locking relation's rows can observe concurrent updates +during re-evaluation, the result of a correlated subquery can change during +re-evaluation. It may be possible to simplify and decorrelate specific examples, +but (at least currently) we cannot guarantee this in the general case. + +Postgres actually re-executes the correlated subqueries with scan nodes rigged +to return the same (single) row that contributed to the original output row (see +[here](https://github.com/postgres/postgres/blob/eae0e20deffb0a73f7cb0e94746f94a1347e71b1/src/backend/executor/execScan.c#L27-L38)). +This differs subtly from fully re-executing the correlated subqueries with +updated values. The behavior does not fit well with CRDB’s vectorized and +distributed execution model, and an implementation would likely impose a +significant maintenance cost. The proposal departs from Postgres here by fully +re-executing correlated subqueries during re-evaluation. Note that non-locked +tables would still read at the old timestamp, and therefore would not observe +concurrent updates. + +This is implemented by duplicating the subquery, and joining it back with the +locked query. Take the following example: +``` +SELECT * FROM customers c +INNER JOIN orders o +ON c.id = o.cust_id +FOR UPDATE OF c; +``` +The duplicated join would look like this: +``` +SELECT * FROM (...) locked INNER JOIN (SELECT * FROM orders) o ON locked.id = o.cust_id; +``` + +##### Left Joins + +Handling a LEFT join in the original query only requires changing the duplicated +join to a LEFT join as well. For the example query: +``` +SELECT * FROM customers c +LEFT JOIN orders o +ON c.id = o.cust_id +FOR UPDATE OF c; +``` +The resulting plan would look like this: +``` +SELECT * FROM (...) locked LEFT JOIN (SELECT * FROM orders) o ON locked.id = o.cust_id; +``` + +##### Multiple Locked Relations + +It is possible to lock multiple tables in a `SELECT FOR UPDATE` query. If this +is the case, the locked relations will be connected by INNER joins, since it is +not possible to lock rows in the right input of a LEFT join. This is implemented +by duplicating the INNER join conditions into a combined WHERE clause that +references the updated values. As always, we can skip evaluating the filters for +non-updated rows using a CASE statement. This WHERE clause will then ensure that +the updated join row passes all the join conditions. + +Example: +``` +SELECT * FROM customers c +INNER JOIN orders o +ON c.id = o.cust_id +INNER JOIN items i +ON i.id = o.item_id +FOR UPDATE; +``` +Note that the `FOR UPDATE` clause here applies to all tables. For this query, +re-evaluation for the join conditions would look like this: +``` +SELECT * FROM (...) locked +WHERE c_id = cust_id AND i_id = item_id; +``` + +Note that this implementation will reproduce Postgres’ behavior where an updated +row will only match with rows that matched before and after the update, only for +the case when multiple relations are locked and joined together. For non-locking +sub-selects/relations, the behavior will still be total re-evaluation instead. + +##### Non-Unique Join Keys + +So far, this discussion has ignored the case when a join is performed on a +non-unique key. In this case, the re-evaluation algorithm will duplicate +results, since there may be more than one match for a given locked row during +re-evaluation. This problem can be resolved using a DISTINCT operator, +de-duplicating on a key for each locked table. + +Example: +``` +CREATE TABLE xy (x INT PRIMARY KEY, y INT); +CREATE TABLE ab (a INT PRIMARY KEY, b INT); + +SELECT * FROM xy INNER JOIN ab ON y = b FOR UPDATE OF xy; +``` +In the above example, the DISTINCT would de-duplicate on column `x`, since it +forms a key for the locked relation `xy`: +``` +SELECT DISTINCT ON (x) * FROM (...) locked; +``` + +#### Mutations + +Mutations add implicit locking on updated rows in the target table. This is +logically equivalent to first running a `SELECT FOR UPDATE` in the transaction +before executing the mutation. Therefore, handling mutations can be reduced to +handling the implicit `SELECT FOR UPDATE` that performs the locking. + +#### Optimization + +It is desirable to avoid re-evaluation for rows that weren't updated. For the +filters used in the re-evaluation step, this is simple: wrap each filter in a +CASE statement that checks whether the row was updated. Example: +``` +CASE WHEN updated THEN ELSE True END; +``` +CASE statements guarantee that a branch is not evaluated unless it is taken, so +this method avoids overhead for filter re-evaluation in the happy case. This +optimization applies to the filters for both WHERE clauses and join ON +conditions. + +The other case that must be optimized is re-evaluation of correlated subqueries. +CASE statements cannot be used here, because it is possible for the subquery to +return multiple rows. However, strict UDFs have the required behavior - a +set-returning strict UDF short-circuits and returns no rows when given a NULL +argument. + +We can build a UDF that re-evaluates the correlated subquery, and join its +result back to the main query. The UDF parameters will correspond to +outer-column references, and the SQL body to the subquery. We can skip +re-evaluation by adding an extra parameter that is non-NULL when the locked row +changed, and NULL when it did not change. Example: +``` +CREATE FUNCTION re_eval_subquery(val1, val2, ..., null_if_not_updated) RETURNS RECORD AS $$ + +$$ STRICT LANGUAGE SQL; +``` +Note that while the example shows a `CREATE` statement, only the UDF's execution +machinery would be built here; it would not have a descriptor or become visible +to users. + +A few corrections are necessary to make the UDF solution work: +1. The old and new values for the subquery must be combined. This can be done + via another CASE statement. +2. The join must preserve locked (left) rows where the UDF returned no rows, + since this is the case when there were no updates. This is handled by always + using a LEFT join. If the original join was an INNER join, we add an extra + filter to remove rows that were updated and did not pass the join filters. + +Example for correlated subquery re-evaluation: +``` +SELECT * FROM (...) locked +LEFT JOIN LATERAL (SELECT * FROM re_eval_subquery(locked.val, null_if_not_updated)) sub +ON locked.id = sub.cust_id +[WHERE (NOT updated) OR passed_join_condition]; -- Only for INNER joins. +``` + +It's unclear how to avoid the overhead of de-duplication for non-unique joins. + +#### Summary + +Ignoring optimizations, the proposed implementation of the "Postgres-Compatible +Intra-Mutation Consistency" model follows these steps for a query with locking: +1. Execute the original query, taking care to keep the primary key of each + target table. +2. Perform a lookup join into the primary index for each target table, locking + each row and returning the most recent values. +3. For each non-locking subquery joined to the target table(s), duplicate the + subquery, and join it back to the main query. +4. Duplicate the query's WHERE clause, remapping it to refer to updated values, + and add it to the main query. +5. Duplicate each join condition that joins target tables to one another, and + add it to the main query's WHERE clause. +6. De-duplicate the result on the key of each target table. + +### Limitations + +* Complexity - many different steps are required in the algorithm in order to + handle the various edge cases around handling joins. The model is also more + complex to reason about and test than "Per-Statement Snapshot Isolation". +* Overhead - in the fast path, the initial scan/filtering must preserve primary + keys for the locking tables in order to perform the locking lookup join. This + will add some execution-time overhead and possibly restrict the possible query + plans. In addition, the slow path is handled via correlated subqueries, which + can have very poor performance. This may not be unique to the predicate + re-evaluation design, however. +* Compatibility - Postgres and CRDB handle row identifiers differently - in PG, + deleting and inserting a row with the same key and a different is not the same + as updating it. In CRDB, the two scenarios are indistinguishable. This means + it would be possible for re-evaluation to read a newly inserted row in CRDB + when it wouldn't be possible in PG. In addition, PG would "follow" an update + that changes the row's key during re-evaluation, while CRDB would not. In + addition, the proposal does not exactly replicate Postgres behavior for + correlated joins with non-locking subqueries. diff --git a/src/current/files/cockroach/docs/tech-notes/admission_control.md b/src/current/files/cockroach/docs/tech-notes/admission_control.md new file mode 100644 index 00000000000..9692bd94a60 --- /dev/null +++ b/src/current/files/cockroach/docs/tech-notes/admission_control.md @@ -0,0 +1,343 @@ +# Admission Control + +Author: Sumeer Bhola + + +## Goals + +Admission control for a system decides when work submitted to that +system begins executing, and is useful when there is some resource +(e.g. CPU) that is saturated. The high-level goals of admission +control in CockroachDB are to control resource overload such that it +(a) does not degrade throughput or cause node failures, (b) achieve +differentiation between work with different levels of importance +submitted by users, and (c) allow for load-balancing among data +replicas (when possible). + +For CockroachDB Serverless, where the shared cluster only runs the KV +layer and below, admission control also encompasses achieving fairness +across tenants (fairness is defined as equally allocating resources, +e.g. CPU, across tenants that are competing for resources). +Multi-tenant isolation for the shared cluster is included in the scope +of admission control since many of the queuing and re-ordering +mechanisms for work prioritization overlap with those for inter-tenant +isolation. + +Even though we do not discuss per-tenant SQL nodes in the remainder of +this document, the overload control mechanisms discussed below could +potentially be applied in that context, though the code for it is +incomplete. The scope of admission control excludes per-tenant cost +controls, since per-tenant cost is not a system resource. + +The current implementation focuses on node-level admission control, +for CPU and storage IO (specifically writes) as the bottleneck +resources. The focus on node-level admission control is based on the +observation that large scale systems may be provisioned adequately at +the aggregate level, but since CockroachDB has stateful nodes, +individual node hotspots can develop that can last for some time +(until rebalancing). Such hotspots should not cause failures or +degrade service for important work (or unfairly for tenants that are +not responsible for the hotspot). + +Specifically, for CPU the goal is to shift queueing from inside the +goroutine scheduler, where there is no differentiation, into various +admission queues, where we can differentiate. This must be done while +allowing the system to have high peak CPU utilization. For storage IO, +the goal is to prevent the log-structured merge (LSM) tree based +storage layer (Pebble) from getting into a situation with high read +amplification due to many files/sub-levels in level 0, which slows +down reads. This needs to be done while maintaining the ability to +absorb bursts of writes. Both CPU overload and high read amplification +in the LSM are areas where we have seen problems in real CockroachDB +clusters. + +The notable omission here is memory as a bottleneck resource for +admission control. The difficulty is that memory is non-preemptible +(mostly, ignoring disk spilling), and is used in various layers of the +system, and so slowing down certain activities (like KV processing) +may make things worse by causing the SQL layer to hold onto memory it +has already allocated. We want forward progress so that memory can be +released, and we do not know what should make progress to release the +most memory. We also currently do not have predictions for how much +memory a SQL query will consume, so we cannot make reasonable +reservations to make up for the non-preemptibility. + +## High-level Approach + + +### Ordering Tuple + +Admission queues use the tuple **(tenant, priority, transaction start +time)**, to order items that are waiting to be admitted. There is +coarse-grained fair sharing across tenants (for the multi-tenant +shared cluster). Priority is used within a tenant, and allows for +starvation, in that if higher priority work is always consuming all +resources, the lower priority work would wait forever. The transaction +start time is used within a priority, and gives preference to earlier +transactions. We currently do not have a way for end-users to assign +priority to their SQL transactions. We also currently do not support +multi-tenancy in dedicated clusters, even though many such customers +would like to distinguish between different internal tenants. Both +these limitations could be addressed by adding the requisite +plumbing/integration code. + + +### Possible solution for CPU Resource with scheduler change + +Let us consider the case of CPU as a bottleneck resource. If we had +the ability to change the goroutine scheduler we could associate the +above ordering tuple with each goroutine and could allocate the CPU +(P) slots to the runnable goroutine that should be next according to +that tuple. Such a scheme does not need to make any guesses about +whether some admitted work is currently doing useful work or blocked +on IO, since there is visibility into that state inside the +scheduler. And if we are concerned about starting too much work, and +not finishing already started work, one could add a **started** +boolean as the first element of the tuple and first give preference to +goroutines that had already started doing some work (there is a draft +Cockroach Labs [internal +doc](https://docs.google.com/document/d/18S4uE8O1nRxULhSg9Z1Zt4jUPBiLJgMh7X1I6shsbug/edit#heading=h.ssc9exx0epqo) +that provides more details). However, we currently do not have the +ability to make such scheduler changes. So we resort to more indirect +control as outlined in the next section. + +### Admission for CPU Resource + +#### Kinds of Work and Queues + +There are various kinds of work that consume CPU, and we add admission +interception points in various places where we expect the +post-admission CPU consumption to be significant and (somewhat) +bounded. Note that there is still extreme heterogeneity in work size, +that we are not aware of at admission time, and we do not know the +CPU/IO ratio for a work unit. Specifically, the interception points +are: + +- **KV**: KV work admission, specifically the + `roachpb.InternalServer.Batch` API implemented by + [`Node`](https://github.com/cockroachdb/cockroach/blob/d10b3a5badf25c9e19ca84037f2426b03196b2ac/pkg/server/node.go#L938). This + includes work submitted by SQL, and internal operations like + garbage collection and node heartbeats. + +- **SQL-KV**: Admission of SQL processing for a response provided by + KV. For example, consider a distributed SQL scan of a large table + that is being executed at N nodes, where the SQL layer is issuing + local requests to the KV layer. The response from KV is subject to + admission control at each node, before processing by the SQL layer. + +- **SQL-SQL**: Distributed SQL runs as a two-level tree where the + lower level sends back responses to the root for further + processing. Admission control applies to the response processing + at the root. + +Each of these kinds of work has its own admission queue. Work is +queued until admitted or the work deadline is exceeded. Currently +there is no early rejection when encountering long queues. Under +aggressive user-specified deadlines throughput may collapse because +everything exceeds the deadline after doing part of the work. This is +no different than what will likely happen without admission control, +due to undifferentiated queueing inside the goroutine scheduler, but +we note that this is a behavior that is not currently improved by +admission control. + +The above list of kinds are ordered from lower level to higher level, +and also serves as a hard-wired ordering from most important to least +important. The high importance of KV work reduces the likelihood that +non-SQL KV work will be starved. SQL-KV (response) work is prioritized +over SQL-SQL since the former includes leaf DistSQL processing and we +would like to release memory used up in RPC responses at lower layers +of RPC tree. We expect that if SQL-SQL (response) work is delayed, it +will eventually reduce new work being issued, which is a desirable +form of natural backpressure. Note that this hard prioritization +across kinds of work is orthogonal to the priority specified in the +ordering tuple, and would ideally not be needed (one reason for +introducing it is due to our inability to change the goroutine +scheduler). + +Consider the example of a lower priority long-running OLAP query +competing with higher priority small OLTP queries in a single node +setting. Say the OLAP query starts first and uses up all the CPU +resource such that the OLTP queries queue up in the KV work +queue. When the OLAP query's KV work completes, it will queue up for +SQL-KV work, which will not start because the OLTP queries are now +using up all available CPU for KV work. When this OLTP KV work +completes, their SQL-KV work will queue up. The queue for SQL-KV will +first admit those for the higher priority OLTP queries. This will +prevent or slow down admission of further work by the OLAP query. + +One weakness of this prioritization across kinds of work is that it +can result in priority inversion: lower importance KV work, not +derived from SQL, like GC of MVCC versions, will happen before +user-facing SQL-KV work. This is because the backpressure via SQL-SQL, +mentioned earlier, does not apply to work generated from within the KV +layer. This could be addressed by introducing a **KV-background** work +kind and placing it last in the above ordering. + +#### Slots and Tokens + +The above kinds of work behave differently in whether we know a work +unit is completed or not. For KV work we know when the admitted work +completes, but this is not possible to know for SQL-KV and SQL-SQL +work because of the way the execution code is structured. Knowing +about completion is advantageous since it allows for better control +over resource consumption, sine we do not know how big each work unit +actually is. + +We model these two different situations with different ways of +granting admission: for KV work we grant a slot that is occupied while +the work executes and becomes available when the work completes, and +for SQL-KV and SQL-SQL we grant a token that is consumed. The slot +terminology is akin to a scheduler, where a scheduling slot must be +free for a thread to run. But unlike a scheduler, we do not have +visibility into the fact that work execution may be blocked on IO. So +a slot can also be viewed as a limit on concurrency of ongoing +work. The token terminology is inspired by token buckets. Unlike a +token bucket, which shapes the rate, the current implementation limits +burstiness and does not do rate shaping -- this is because it is hard +to predict what rate is appropriate given the difference in sizes of +the work. + +#### Slot Adjustment for KV + +The current implementation makes no dynamic adjustments to token burst +sizes since the lack of a completion indicator and heterogeneity in +size makes it hard to figure out how to adjust these tokens. In +contrast, the slots that limit KV work concurrency are adjusted. And +because KV work must be admitted (and have no waiting requests) before +admission of SQL-KV and SQL-SQL work, the slot adjustment also +throttles the latter kinds of work. + +We monitor the following state of the goroutine scheduler: the number +of processors, and the number of goroutines that are runnable (i.e., +they are ready to run, but not running). The latter represents +queueing inside the goroutine scheduler, since these goroutines are +waiting to be scheduled. KV work concurrency slots are adjusted by +gradually decreasing or increasing the total slots (additive +decrements or increments), when the runnable count, normalized by the +number of processors, is too high or too low. The adjustment also +takes into account current usage of slots. The exact heuristic can be +found in `admission.kvSlotAdjuster`. It monitors the runnable count at +1ms intervals. The motivation for this high frequency is that sudden +shifts in CPU/IO ratio or lock contention can cause the current slot +count to be inadequate, while leaving the CPU underutilized, which is +undesirable. + + +#### Instantaneous CPU feedback and limiting bursts + +Tokens are granted (up to the burst size) for SQL-KV when the KV work +queue is empty and the CPU is not overloaded. For SQL-SQL the +additional requirement is that the SQL-KV work queue must also be +empty. It turns out that using the runnable goroutine count at 1ms +intervals, as a signal for CPU load, is insufficient time granularity +to properly control token grants. So we use two instantaneous +indicators: + +- CPU is considered overloaded if all the KV slots are utilized. + +- Tokens are not directly granted to waiting requests up to the burst + size. Instead we setup a "grant chaining" system where the goroutine + that is granted a token has to run and grant the next token. This + gives instantaneous feedback into the overload state. In an + experiment, using such grant chains reduced burstiness of grants by + 5x and shifted ~2s of latency (at p99) from the goroutine scheduler + into admission control (which is desirable since the latter is where + we can differentiate between work). + +### Admission for IO Resource + +KV work that involves writing to a store is additionally subject to a +per-store admission queue. Admission in this queue uses tokens +(discussed below), and happens before the KV work is subject to the KV +CPU admission queue (which uses slots), so that we do not have a +situation where a KV slot is taken up by work that is now waiting for +IO admission. + +KV work completion is not a good indicator of write work being +complete in the LSM tree. This is because flushes of memtables, and +compactions of sstables, which are the costly side-effect of writes, +happen later. We use "IO tokens" to constrain how many KV work items +are admitted. There is no limit on tokens when the LSM is healthy. Bad +health is indicated using thresholds on two level 0 metrics: the +sub-level count and file count. We do not consider other metrics for +health (compaction backlog, high compaction scores on other levels +etc.) since we are primarily concerned with increasing +read-amplification, which is what will impact user-facing traffic. It +is acceptable for the LSM to deteriorate in terms of compaction scores +etc. as long as the read-amplification does not explode, because +absorbing write bursts is important, and write bursts are often +followed by long enough intervals of low activity that restore the LSM +compaction scores to good health. + +When the LSM is considered overloaded, tokens are calculated by +estimating the average bytes added per KV work, and using the outgoing +compaction bytes to estimate how many KV work items it is acceptable +to admit. This detailed logic can be found in +`admission.ioLoadListener` which generates a new token estimate every +15s. The 15s duration is based on experimental observations of +compaction durations in level 0 when the number of sub-levels +increases beyond the overload threshold. We want a duration that is +larger than most compactions, but not too large (for +responsiveness). These tokens are given out in 1s intervals (for +smoothing). The code has comments with experimental details that +guided some of the settings. + +### Priority adjustments and Bypassing admission control + +KV work that is not directly issued by SQL is never queued, though it +does consume a slot, which means the available slots can become +negative. This is a simple hack to prevent distributed deadlock that +can happen if we queue KV work issued due to other KV work. This will +need to be changed in the future to also queue low priority KV +operations (e.g. GC can be considered lower priority than user facing +work). + +Transactions that are holding locks, or have ongoing requests to +acquire locks, have their subsequent work requests bumped to higher +priority. This is a crude way to limit priority inversion where a +transaction holding locks could be waiting in an admission queue while +admitted requests are waiting in the lock table queues for this +transaction to make progress and release locks. Such prioritization +can also fare better than a system with no admission control, since +work from transactions holding locks will get prioritized, versus no +prioritization in the goroutine scheduler. A TPCC run with 3000 +warehouses showed 2x reduction in lock waiters and 10+% improvement in +transaction throughput with this priority adjustment compared to no +priority adjustment. See +https://github.com/cockroachdb/cockroach/pull/69337#issue-978534871 +for comparison graphs. + +### Tuning Knobs + +Enabling admission control is done via cluster settings. It is +currently disabled by default. There are three boolean settings, +`admission.kv.enabled`, `admission.sql_kv_response.enabled`, +`admission.sql_sql_response.enabled` and we have only experimented +with all of them turned on. + +There are also some advanced tuning cluster settings, that adjust the +CPU overload threshold and the level 0 store overload thresholds. + +### Results + +There are certain TPCC-bench and KV roachtests running regularly with +admission control enabled (they have "admission" in the test name +suffix). The TPCC performance is roughly equivalent to admission +control disabled. Certain KV roachtests that used to overload IO, but +were not running long enough to show the bad effects, are worse with +admission control enabled when comparing the mean throughput. However +the runs with admission control enabled are arguably better since they +do not have a steadily worsening throughput over the course of the +experimental run. + +For some graphs showing before and after effects of enabling admission control see: + +- CPU overload: + https://github.com/cockroachdb/cockroach/pull/65614#issue-651424608 + and + https://github.com/cockroachdb/cockroach/pull/66891#issue-930351128 + +- IO overload: + https://github.com/cockroachdb/cockroach/pull/65850#issue-656777155 + and + https://github.com/cockroachdb/cockroach/pull/69311#issue-978297918 diff --git a/src/current/files/cockroach/docs/tech-notes/encoding.md b/src/current/files/cockroach/docs/tech-notes/encoding.md new file mode 100644 index 00000000000..94d55360d25 --- /dev/null +++ b/src/current/files/cockroach/docs/tech-notes/encoding.md @@ -0,0 +1,561 @@ +Structured data encoding in CockroachDB SQL +=========================================== + +Like many databases, CockroachDB (CRDB) encodes SQL data into key-value +(KV) pairs. The format evolves over time, with an eye toward backward +compatibility. This document describes format version 3 in detail except +for how CRDB encodes primitive values ([pkg/util/encoding/encoding.go]). + +The Cockroach Labs blog post [SQL in CockroachDB: Mapping Table Data to +Key-Value Storage] covers format version 1, which predates column +families, interleaving, and composite encoding. Format version 2 +introduced column families, covered in [Implementing Column Families in +CockroachDB]. See also the [column families RFC] and the [interleaving +RFC]. + +This document was originally written by David Eisenstat +<>. + +Tables (primary indexes) +------------------------ + +SQL tables consist of a rectangular array of data and some metadata. The +metadata include a unique table ID; a nonempty list of primary key +columns, each with an ascending/descending designation; and some +information about each column. Each column has a numeric ID that is +unique within the table, a SQL type, and a column family ID. A column +family is a maximal subset of columns with the same column family ID. +For more details, see [pkg/sql/catalog/descpb/structured.proto]. + +Each row of a table gives rise to one or more KV pairs, one per column +family as needed (see subsection NULL below). CRDB stores primary key +data in KV keys and other data in KV values so that it can use the KV +layer to prevent duplicate primary keys. For encoding, see +[pkg/sql/row/writer.go]. For decoding, see +[pkg/sql/row/fetcher.go]. + +### Key encoding + +KV keys consist of several fields: + +1. The table ID +2. The ID of the primary index (see section Indexes below) +3. The primary key of the row, one field per primary key column in list + order +4. The column family ID. +5. When the previous field is nonzero (non-sentinel), its length in + bytes. + +CRDB encodes these fields individually and concatenates the resulting +bytes. The decoder can determine the field boundaries because the field +encoding is [prefix-free]. + +Encoded fields start with a byte that indicates the type of the field. +For primary key fields, this type has a one-to-many relationship with +the SQL datum type. The SQL types `STRING` and `BYTES`, for example, +share an encoding. The relationship will become many-to-many when CRDB +introduces a [new `DECIMAL` encoding], since the old decoder will be +retained for backward compatibility. + +The format of the remaining bytes depends on the field type. The details +(in [pkg/util/encoding/encoding.go]) are irrelevant here except that, +for primary key fields, these bytes have the following order property. +Consider a particular primary key column and let enc be the mathematical +function that maps SQL data in that column to bytes. + +- If the column has an ascending designation, then for data *x* and + *y*, enc(*x*) ≤ enc(*y*) if and only if *x* ≤ *y*. +- If the column has a descending designation, then for data *x* and + *y*, enc(*x*) ≤ enc(*y*) if and only if *x* ≥ *y*. + +In conjunction with prefix freedom, the order property ensures that the +SQL layer and the KV layer sort primary keys the same way. + +For more details on primary key encoding, see `EncodeTableKey` +([pkg/sql/rowenc/column\_type\_encoding.go]). See also `EncDatum` +([pkg/sql/rowenc/encoded\_datum.go]). + +### Value encoding + +KV values consist of + +1. A four-byte checksum covering the whole KV pair +2. A one-byte value type (see the enumeration `ValueType` in + [pkg/roachpb/data.proto]) +3. Data from where the row specified in the KV key intersects the + specified column family, including composite encodings of primary + key columns that are members of the specified column family. + +The value type defaults to `TUPLE`, which indicates the following +encoding. (For other values, see subsection Single-column column +families below.) For each column in the column family sorted by column +ID, encode the column ID difference and the datum encoding type +(unrelated to the value type!) jointly, followed by the datum itself. +The column ID difference is the column ID minus the previous column ID +if this column is not the first, else the column ID. The joint encoding +is commonly one byte, which displays conveniently in hexadecimal as the +column ID difference followed by the datum encoding type. + +The Go function that performs the joint encoding is `encodeValueTag` +([pkg/util/encoding/encoding.go]), which emits an unsigned integer with +a variable-length encoding. The low four bits of the integer contain the +datum encoding type. The rest contain the column ID difference. As an +alternative for datum encoding types greater than 14, `encodeValueTag` +sets the low four bits to `SentinelType` (15) and emits the actual datum +encoding type next. + +**Note:** Values for sequences are a special case: the sequence value is +encoded as if the sequence were a one-row, one-column table, with the +key structured in the usual way: `/Table////`. +However, the value is a bare int64; it doesn't use the encoding +specified here. This is because it is incremented using the KV +`Increment` operation so that the increment can be done in one +roundtrip, not a read followed by a write as would be required by a +normal SQL `UPDATE`. + +An alternative design would be to teach the KV Inc operation to +understand SQL value encoding so that the sequence could be encoded +consistently with tables, but that would break the KV/SQL abstraction +barrier. + +The code that performs generation of keys and values for primary indexes +can be found in `prepareInsertOrUpdateBatch`([pkg/sql/row/writer.go]). + +### Sentinel KV pairs + +The column family with ID 0 is special because it contains the primary +key columns. The KV pairs arising from this column family are called +sentinel KV pairs. CRDB emits sentinel KV pairs regardless of whether +the KV value has other data, to guarantee that primary keys appear in at +least one KV pair. (Even if there are other column families, their KV +pairs may be suppressed; see subsection NULL below.) + +Note that in system tables that use multiple column families, such as +system.zones or system.namespace, there may not be any sentinel KV pair at all. +This is because of the fact that the database writes to these system tables +using raw KV puts and does not include the logic to write a sentinel KV. KV +decoding code that needs to understand system tables must be aware of this +possibility. + +### Single-column column families + +Before column families (i.e., in format version 1), non-sentinel KV keys +had a column ID where the column family ID is now. Non-sentinel KV +values contained exactly one datum, whose encoding was indicated by the +one-byte value type (see `MarshalColumnValue` in +[pkg/sql/rowenc/column\_type\_encoding.go]). Unlike the `TUPLE` encoding, this encoding +did not need to be prefix-free, which was a boon for strings. + +On upgrading to format version 2 or higher, CRDB puts each existing +column in a column family whose ID is the same as the column ID. This +allows backward-compatible encoding and decoding. The encoder uses the +old format for single-column column families when the ID of that column +equals the `DefaultColumnID` of the column family +([pkg/sql/catalog/descpb/structured.proto]). + +### NULL + +SQL `NULL` has no explicit encoding in tables (primary indexes). +Instead, CRDB encodes each row as if the columns where that row is null +did not exist. If all of the columns in a column family are null, then +the corresponding KV pair is suppressed. The motivation for this design +is that adding a column does not require existing data to be re-encoded. + +### Example dump + +The commands below create a table and insert some data. An annotated KV +dump follows. + + CREATE TABLE accounts ( + id INT PRIMARY KEY, + owner STRING, + balance DECIMAL, + FAMILY f0 (id, balance), + FAMILY f1 (owner) + ); + + INSERT INTO accounts VALUES + (1, 'Alice', 10000.50), + (2, 'Bob', 25000.00), + (3, 'Carol', NULL), + (4, NULL, 9400.10), + (5, NULL, NULL); + +Here is the relevant output from +`cockroach debug rocksdb scan --value_hex`, with annotations. + + /Table/51/1/1/0/1489427290.811792567,0 : 0xB244BD870A3505348D0F4272 + ^- ^ ^ ^ ^-------^-^^^----------- + | | | | | | ||| + Table ID (accounts) Checksum| ||| + | | | | ||| + Index ID Value type (TUPLE) + | | ||| + Primary key (id = 1) Column ID difference + | || + Column family ID (f0) Datum encoding type (Decimal) + | + Datum encoding (10000.50) + + /Table/51/1/1/1/1/1489427290.811792567,0 : 0x30C8FBD403416C696365 + ^- ^ ^ ^ ^ ^-------^-^--------- + | | | | | | | | + Table ID (accounts) Checksum| | + | | | | | | + Index ID Value type (BYTES) + | | | | + Primary key (id = 1) Datum encoding ('Alice') + | | + Column family ID (f1) + | + Column family ID encoding length + + /Table/51/1/2/0/1489427290.811792567,0 : 0x2C8E35730A3505348D2625A0 + ^ ^----------- + 2 25000.00 + + /Table/51/1/2/1/1/1489427290.811792567,0 : 0xE911770C03426F62 + ^ ^----- + 2 'Bob' + + /Table/51/1/3/0/1489427290.811792567,0 : 0xCF8B38950A + ^ + 3 + + /Table/51/1/3/1/1/1489427290.811792567,0 : 0x538EE3D6034361726F6C + ^ ^--------- + 3 'Carol' + + /Table/51/1/4/0/1489427290.811792567,0 : 0x247286F30A3505348C0E57EA + ^ ^----------- + 4 9400.10 + + /Table/51/1/5/0/1489427290.811792567,0 : 0xCB0644270A + ^ + 5 + +### Composite encoding + +There exist decimal numbers and collated strings that are equal but not +identical, e.g., 1.0 and 1.000. This is problematic because in primary +keys, 1.0 and 1.000 must have the same encoding, which precludes +lossless decoding. Worse, the encoding of collated strings in primary +keys is defined by the [Unicode Collation Algorithm], which may not even +have [an efficient partial inverse]. + +When collated strings and [(soon) decimals][new `DECIMAL` encoding] +appear in primary keys, they have composite encoding. For collated +strings, this means encoding data as both a key and value, with the +latter appearing in the sentinel KV value (naturally, since the column +belongs to the column family with ID 0). + +Example schema and data: + + CREATE TABLE owners ( + owner STRING COLLATE en PRIMARY KEY + ); + + INSERT INTO owners VALUES + ('Bob' COLLATE en), + ('Ted' COLLATE en); + +Example dump: + + /Table/51/1/"\x16\x05\x17q\x16\x05\x00\x00\x00 \x00 \x00 \x00\x00\b\x02\x02"/0/1489502864.477790157,0 : 0xDC5FDAE10A1603426F62 + ^--------------------------------------------------------------- ^------- + Collation key for 'Bob' 'Bob' + + /Table/51/1/"\x18\x16\x16L\x161\x00\x00\x00 \x00 \x00 \x00\x00\b\x02\x02"/0/1489502864.477790157,0 : 0x8B30B9290A1603546564 + ^------------------------------------------------------------ ^------- + Collation key for 'Ted' 'Ted' + +Indexes (secondary indexes) +--------------------------- + +To unify the handling of SQL tables and indexes, CRDB stores the +authoritative table data in what is termed the primary index. SQL +indexes are secondary indexes. All indexes have an ID that is unique +within their table. + +The user-specified metadata for secondary indexes include a nonempty +list of indexed columns, each with an ascending/descending designation, +and a disjoint list of stored columns. The first list determines how the +index is sorted, and columns from both lists can be read directly from +the index. + +Users also specify whether a secondary index should be unique. Unique +secondary indexes constrain the table data not to have two rows where, +for each indexed column, the data therein are non-null and equal. + +As of #42073 (after version 19.2), secondary indexes have been extended to +include support for column families. These families are the same as the ones +defined upon the table. Families will apply to the stored columns in the index. +Like in primary indexes, column family 0 on a secondary index will always be +present for a row so that each row in the index has at least one k/v entry. + +### Key encoding + +The main encoding function for secondary indexes is +`EncodeSecondaryIndex` in [pkg/sql/rowenc/index\_encoding.go]. Each row gives +rise to one KV pair per secondary index, whose KV key has fields +mirroring the primary index encoding: + +1. The table ID +2. The index ID +3. Data from where the row intersects the indexed columns +4. If the index is non-unique or the row has a NULL in an indexed + column, data from where the row intersects the non-indexed primary + key (implicit) columns +5. If the index is non-unique or the row has a NULL in an indexed + column, and the index uses the old format for stored columns, data + from where the row intersects the stored columns +6. The column family ID. +7. When the previous field is nonzero (non-sentinel), its length in bytes. + +Unique indexes relegate the data in extra columns to KV values so that +the KV layer detects constraint violations. The special case for an +indexed NULL arises from the fact that NULL does not equal itself, hence +rows with an indexed NULL cannot be involved in a violation. They need a +unique KV key nonetheless, as do rows in non-unique indexes, which is +achieved by including the non-indexed primary key data. For the sake of +simplicity, data in stored columns are also included. + +### Value encoding +KV values for secondary indexes are encoded using the following rules: + +If the value corresponds to column family 0: + +The KV value will have value type bytes, and will consist of +1. If the index is unique, data from where the row intersects the + non-indexed primary key (implicit) columns, encoded as in the KV key +2. If the index is unique, and the index uses the old format for stored + columns, data from where the row intersects the stored columns, + encoded as in the KV key +3. If needed, `TUPLE`-encoded bytes for non-null composite and stored + column data in family 0 (new format). + +Since column family 0 is always included, it contains extra information +that the index stores in the value, such as composite column values and +stored primary key columns. Note that this is different than the encoding of +composite indexed columns values in primary key columns, where the composite +value component of an indexed column is placed in the KV pair corresponding +to the column family of the indexed column. All of these fields are optional, +so the `BYTES` value may be empty. Note that, in a unique index, rows with +a NULL in an indexed column have their implicit column data stored in both the +KV key and the KV value. (Ditto for stored column data in the old format.) + +For indexes with more than one column family, the remaining column families' +KV values will have value type `TUPLE` and will consist of all stored +columns in that family in the `TUPLE` encoded format. + +### Backwards Compatibility With Indexes Encoded Without Families + +Index descriptors hold on to a version bit that denotes what encoding +format the descriptor was written in. The default value of the bit denotes +the original secondary index encoding, and indexes created when all +nodes in a cluster are version 20.1 or greater will have the version representing +secondary indexes with column families. + +### Example dump + +Example schema and data: + + CREATE TABLE accounts ( + id INT PRIMARY KEY, + owner STRING, + balance DECIMAL, + UNIQUE INDEX i2 (owner) STORING (balance), + INDEX i3 (owner) STORING (balance) + ); + + INSERT INTO accounts VALUES + (1, 'Alice', 10000.50), + (2, 'Bob', 25000.00), + (3, 'Carol', NULL), + (4, NULL, 9400.10), + (5, NULL, NULL); + +Index ID 1 is the primary index. + + /Table/51/1/1/0/1489504989.617188491,0 : 0x4AAC12300A2605416C6963651505348D0F4272 + /Table/51/1/2/0/1489504989.617188491,0 : 0x148941AD0A2603426F621505348D2625A0 + /Table/51/1/3/0/1489504989.617188491,0 : 0xB1D0B5390A26054361726F6C + /Table/51/1/4/0/1489504989.617188491,0 : 0x247286F30A3505348C0E57EA + /Table/51/1/5/0/1489504989.617188491,0 : 0xCB0644270A + +#### Old STORING format + +Index ID 2 is the unique secondary index `i2`. + + /Table/51/2/NULL/4/9400.1/0/1489504989.617188491,0 : 0x01CF9BB0038C2BBD011400 + ^--- ^ ^----- ^-^-^--------- + Indexed column | Stored column BYTES 4 9400.1 + Implicit column + + /Table/51/2/NULL/5/NULL/0/1489504989.617188491,0 : 0xE86B1271038D00 + ^--- ^ ^--- ^-^-^- + Indexed column | Stored column BYTES 5 NULL + Implicit column + + /Table/51/2/"Alice"/0/1489504989.617188491,0 : 0x285AC6F303892C0301016400 + ^------ ^-^-^----------- + Indexed column BYTES 1 10000.5 + + /Table/51/2/"Bob"/0/1489504989.617188491,0 : 0x23514F1F038A2C056400 + ^---- ^-^-^------- + Indexed column BYTES 2 2.5E+4 + + /Table/51/2/"Carol"/0/1489504989.617188491,0 : 0xE98BFEE6038B00 + ^------ ^-^-^- + Indexed column BYTES 3 NULL + +Index ID 3 is the non-unique secondary index `i3`. + + /Table/51/3/NULL/4/9400.1/0/1489504989.617188491,0 : 0xEEFAED0403 + ^--- ^ ^----- ^- + Indexed column | Stored column BYTES + Implicit column + + /Table/51/3/NULL/5/NULL/0/1489504989.617188491,0 : 0xBE090D2003 + ^--- ^ ^--- ^- + Indexed column | Stored column BYTES + Implicit column + + /Table/51/3/"Alice"/1/10000.5/0/1489504989.617188491,0 : 0x7B4964C303 + ^------ ^ ^------ ^- + Indexed column | Stored column BYTES + Implicit column + + /Table/51/3/"Bob"/2/2.5E+4/0/1489504989.617188491,0 : 0xDF24708303 + ^---- ^ ^----- ^- + Indexed column | Stored column BYTES + Implicit column + + /Table/51/3/"Carol"/3/NULL/0/1489504989.617188491,0 : 0x96CA34AD03 + ^------ ^ ^--- ^- + Indexed column | Stored column BYTES + Implicit column + +#### New STORING format + +Index ID 2 is the unique secondary index `i2`. + + /Table/51/2/NULL/4/0/1492010940.897101344,0 : 0x7F2009CC038C3505348C0E57EA + ^--- ^ ^-^-^------------- + Indexed column Implicit column BYTES 4 9400.10 + + /Table/51/2/NULL/5/0/1492010940.897101344,0 : 0x48047B1A038D + ^--- ^ ^-^- + Indexed column Implicit column BYTES 5 + + /Table/51/2/"Alice"/0/1492010940.897101344,0 : 0x24090BCE03893505348D0F4272 + ^------ ^-^-^------------- + Indexed column BYTES 1 10000.50 + + /Table/51/2/"Bob"/0/1492010940.897101344,0 : 0x54353EB9038A3505348D2625A0 + ^---- ^-^-^------------- + Indexed column BYTES 2 25000.00 + + /Table/51/2/"Carol"/0/1492010940.897101344,0 : 0xE731A320038B + ^------ ^-^- + Indexed column BYTES 3 + +Index ID 3 is the non-unique secondary index `i3`. + + /Table/51/3/NULL/4/0/1492010940.897101344,0 : 0x17C357B0033505348C0E57EA + ^--- ^ ^-^------------- + Indexed column Implicit column BYTES 9400.10 + + /Table/51/3/NULL/5/0/1492010940.897101344,0 : 0x844708BC03 + ^--- ^ ^- + Indexed column Implicit column BYTES + + /Table/51/3/"Alice"/1/0/1492010940.897101344,0 : 0x3AD2E728033505348D0F4272 + ^------ ^ ^-^------------- + Indexed column Implicit column BYTES 10000.50 + + /Table/51/3/"Bob"/2/0/1492010940.897101344,0 : 0x7F1225A4033505348D2625A0 + ^---- ^ ^-^------------- + Indexed column Implicit column BYTES 25000.00 + + /Table/51/3/"Carol"/3/0/1492010940.897101344,0 : 0x45C61B8403 + ^------ ^ ^- + Indexed column Implicit column BYTES + +### Example dump with families +``` +CREATE TABLE t ( + a INT, b INT, c INT, d INT, e INT, f INT, + PRIMARY KEY (a, b), + UNIQUE INDEX i (d, e) STORING (c, f), + FAMILY (a, b, c), FAMILY (d, e), FAMILY (f) +); + +INSERT INTO t VALUES (1, 2, 3, 4, 5, 6); + +/Table/52/2/4/5/0/1572546219.386986000,0 : 0xBDD6D93003898A3306 + ^-- ^ ^_^_______ + Indexed cols Column family 0 BYTES Stored PK cols + column c +// Notice that /Table/52/2/4/5/1/1/ is not present, because these values are already indexed +/Table/52/2/4/5/2/1/1572546219.386986000,0 : 0x46CC99AE0A630C + ^__ ^_^___ + Column Family 2 TUPLE column f +``` + +### Composite encoding + +Secondary indexes use key encoding for all indexed columns, implicit +columns, and stored columns in the old format. Every datum whose key +encoding does not suffice for decoding (collated strings, floating-point +and decimal negative zero, decimals with trailing zeros) is encoded +again, in the same `TUPLE` that contains stored column data in the new +format. + +Example schema and data: + + CREATE TABLE owners ( + id INT PRIMARY KEY, + owner STRING COLLATE en, + INDEX i2 (owner) + ); + + INSERT INTO owners VALUES + (1, 'Ted' COLLATE en), + (2, 'Bob' COLLATE en), + (3, NULL); + +Index ID 1 is the primary index. + + /Table/51/1/1/0/1492008659.730236666,0 : 0x6CA87E2B0A2603546564 + /Table/51/1/2/0/1492008659.730236666,0 : 0xE900EBB50A2603426F62 + /Table/51/1/3/0/1492008659.730236666,0 : 0xCF8B38950A + +Index ID 2 is the secondary index `i2`. + + /Table/51/2/NULL/3/0/1492008659.730236666,0 : 0xBDAA5DBE03 + ^--- ^- + Indexed column BYTES + + /Table/51/2/"\x16\x05\x17q\x16\x05\x00\x00\x00 \x00 \x00 \x00\x00\b\x02\x02"/2/0/1492008659.730236666,0 : 0x4A8239F6032603426F62 + ^--------------------------------------------------------------- ^-^--------- + Indexed column: Collation key for 'Bob' BYTES 'Bob' + + /Table/51/2/"\x18\x16\x16L\x161\x00\x00\x00 \x00 \x00 \x00\x00\b\x02\x02"/1/0/1492008659.730236666,0 : 0x747DA39A032603546564 + ^------------------------------------------------------------ ^-^--------- + Indexed column: Collation key for 'Ted' BYTES 'Ted' + + [pkg/util/encoding/encoding.go]: https://github.com/cockroachdb/cockroach/blob/master/pkg/util/encoding/encoding.go + [SQL in CockroachDB: Mapping Table Data to Key-Value Storage]: https://www.cockroachlabs.com/blog/sql-in-cockroachdb-mapping-table-data-to-key-value-storage/ + [Implementing Column Families in CockroachDB]: https://www.cockroachlabs.com/blog/sql-cockroachdb-column-families/ + [column families RFC]: https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20151214_sql_column_families.md + [interleaving RFC]: https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160624_sql_interleaved_tables.md + [pkg/sql/catalog/descpb/structured.proto]: https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/catalog/descpb/structured.proto + [pkg/sql/row/writer.go]: https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/row/writer.go + [pkg/sql/row/fetcher.go]: https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/row/fetcher.go + [prefix-free]: https://en.wikipedia.org/wiki/Prefix_code + [new `DECIMAL` encoding]: https://github.com/cockroachdb/cockroach/issues/13384#issuecomment-277120394 + [pkg/sql/rowenc/column\_type\_encoding.go]: https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/rowenc/column_type_encoding.go + [pkg/sql/rowenc/encoded\_datum.go]: https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/rowenc/encoded_datum.go + [pkg/roachpb/data.proto]: https://github.com/cockroachdb/cockroach/blob/master/pkg/roachpb/data.proto + [Unicode Collation Algorithm]: http://unicode.org/reports/tr10/ + [an efficient partial inverse]: http://stackoverflow.com/q/23609457/2144669 diff --git a/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json new file mode 100644 index 00000000000..862b0efa73f --- /dev/null +++ b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json @@ -0,0 +1,1308 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 10, + "links": [], + "liveNow": false, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(ranges{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"})) by (instance)", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Ranges", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(replicas_leaders{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Leaders", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(replicas_leaseholders{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "legendFormat": "Lease Holders", + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(replicas_leaders_not_leaseholders{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Leaders w/o Lease", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(ranges_unavailable{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Unavailable", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(ranges_underreplicated{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Under-replicated", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(ranges_overreplicated{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Over-replicated", + "refId": "F" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Ranges", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:133", + "format": "short", + "label": "ranges", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:134", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of replicas on each store.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(replicas{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Replicas per Store", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:431", + "format": "short", + "label": "replicas", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:432", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of leaseholder replicas on each store. A leaseholder replica is the one that receives and coordinates all read and write requests for its range.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(replicas_leaseholders{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Leaseholders per Store", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:581", + "format": "short", + "label": "leaseholders", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:582", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Exponentially weighted moving average of the number of KV batch requests processed by leaseholder replicas on each store per second. Tracks roughly the last 30 minutes of requests. Used for load-based rebalancing decisions.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rebalancing_queriespersecond{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Average Queries per Store", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:731", + "format": "short", + "label": "queries", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:732", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of logical bytes stored in [key-value pairs](https://www.cockroachlabs.com/docs/v21.1/architecture/distribution-layer.html#table-data) on each node.\n\nThis includes historical and deleted data.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "totalbytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Logical Bytes per Store", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:807", + "format": "bytes", + "label": "logical store size", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:808", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(replicas{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance)) ", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Replicas", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(replicas_quiescent{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Quiescent", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Replica Quiescence", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:883", + "format": "short", + "label": "replicas", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:884", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(rate(range_splits{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Splits", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(rate(range_merges{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Merges", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(rate(range_adds{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Adds", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(rate(range_removes{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Removes", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(rate(leases_transfers_success{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Lease Transfers", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(rate(rebalancing_lease_transfers{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Load-based Lease Transfers", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(rate(rebalancing_range_rebalances{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Load-based Range Rebalances", + "refId": "G" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Range Operations", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:959", + "format": "short", + "label": "ranges", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:960", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(range_snapshots_generated{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Generated", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(range_snapshots_applied_voter{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Applied (Voters)", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(range_snapshots_applied_initial{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Applied (Initial Upreplication)", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(range_snapshots_applied_initial{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval]))", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Applied (Non-Voters)", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(replicas_reserved{cluster=~\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Reserved Replicas", + "refId": "E" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Snapshots", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1109", + "format": "short", + "label": "snapshots", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1110", + "format": "short", + "label": "", + "logBase": 1, + "min": "0", + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 64 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(raft_rcvd_app{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Msg App", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:959", + "format": "short", + "label": "ranges", + "logBase": 2, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:960", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 72 + }, + "hiddenSeries": false, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.3.1", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(raft_rcvd_queued_bytes{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval])", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "", + "hide": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Msg Queued Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:959", + "format": "short", + "label": "ranges", + "logBase": 2, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:960", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": "datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "drew-demo", + "value": "drew-demo" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "sys_uptime{job=\"cockroachdb\"}", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "sys_uptime{job=\"cockroachdb\"}", + "refId": "Prometheus-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "/cluster=\"([^\"]+)\"/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "hide": 0, + "includeAll": true, + "label": "Node", + "multi": false, + "name": "node", + "options": [], + "query": { + "query": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "refId": "Prometheus-node-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "30s", + "value": "30s" + }, + "hide": 0, + "label": "Rate Interval", + "name": "rate_interval", + "options": [ + { + "selected": true, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "15m", + "value": "15m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "2h", + "value": "2h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "30s,1m,5m,15m,30m,1h,2h,1d", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "America/New_York", + "title": "CRDB Console: Replication ", + "uid": "crdb-console-replications", + "version": 4, + "weekStart": "" +} diff --git a/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json new file mode 100644 index 00000000000..1bb09fba55f --- /dev/null +++ b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json @@ -0,0 +1,987 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 8, + "links": [], + "liveNow": false, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of live nodes in the cluster.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "min(liveness_livenodes{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Live Nodes", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Live Node Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:637", + "format": "short", + "label": "nodes", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:638", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Memory in use across all nodes:\nRSS \nTotal memory in use by CockroachDB\n\nGo Allocated \nMemory allocated by the Go layer\n\nGo Total \nTotal memory managed by the Go layer\n\nC Allocated \nMemory allocated by the C layer\n\nC Total \nTotal memory managed by the C layer", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sys_rss{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Total memory (RSS)", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sys_cgo_allocbytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Go Allocated", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sys_go_totalbytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Go Total", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sys_go_allocbytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "CGo Allocated", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sys_cgo_totalbytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "CGo Total", + "refId": "E" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Memory Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:863", + "format": "bytes", + "label": "memory usage", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:864", + "format": "bytes", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of Goroutines across all nodes. This count should rise and fall based on load.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sys_goroutines{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Goroutine Count", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Goroutine Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1235", + "format": "short", + "label": "goroutines", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1236", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of Goroutines waiting for CPU. This count should rise and fall based on load.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sys_runnable_goroutines_per_cpu{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"}", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Runnable Goroutines per CPU", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:391", + "format": "short", + "label": "goroutines", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:392", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of times that Go’s garbage collector was invoked per second across all nodes.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(sys_gc_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "GC Runs", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "GC Runs", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1311", + "format": "short", + "label": "runs", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1312", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The amount of processor time used by Go’s garbage collector per second across all nodes. During garbage collection, application code execution is paused.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sys_gc_pause_ns{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "GC Pause Time", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "GC Pause Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1387", + "format": "ns", + "label": "pause time", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1388", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sys_cpu_user_ns{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "instant": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "User CPU Time", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sys_cpu_sys_ns{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "instant": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Sys CPU Time", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "CPU Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1833", + "format": "ns", + "label": "cpu time", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1834", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Mean clock offset of each node against the rest of the cluster.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "clock_offset_meannanos{job=\"cockroachdb\",instance=~\"$node\",cluster=~\"$cluster\"}", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Clock Offset", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:787", + "format": "ns", + "label": "offset", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:788", + "format": "short", + "label": "", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "30s", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": "datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "drew-demo", + "value": "drew-demo" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "sys_uptime{job=\"cockroachdb\"}", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "sys_uptime{job=\"cockroachdb\"}", + "refId": "Prometheus-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "/cluster=\"([^\"]+)\"/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "hide": 0, + "includeAll": true, + "label": "Node", + "multi": false, + "name": "node", + "options": [], + "query": { + "query": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "refId": "Prometheus-node-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "30s", + "value": "30s" + }, + "hide": 0, + "label": "Rate Interval", + "name": "rate_interval", + "options": [ + { + "selected": true, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "America/New_York", + "title": "CRDB Console: Runtime ", + "uid": "crdb-console-runtime", + "version": 4, + "weekStart": "" +} diff --git a/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json new file mode 100644 index 00000000000..b90cbbe26dc --- /dev/null +++ b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json @@ -0,0 +1,1922 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 7, + "links": [], + "liveNow": false, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of open SQL Sessions.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sql_conns{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Open SQL Sessions", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:108", + "format": "short", + "label": "connections", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:109", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of SQL transactions currently open.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sql_txns_open{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Open SQL Transactions", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:279", + "format": "short", + "label": "transactions", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:280", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of SQL statements currently running.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sql_distsql_queries_active{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"})", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Active Statements", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Active SQL Statements", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:184", + "format": "short", + "label": "queries", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:185", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total amount of SQL client network traffic in bytes per second.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_bytesin{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "instant": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "Bytes In", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_bytesout{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Bytes Out", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "SQL Byte Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:260", + "format": "bytes", + "label": "byte traffic", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:261", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "A moving average of the # of SELECT, INSERT, UPDATE, and DELETE statements successfully executed per second.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.0.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_select_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Selects", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_update_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Updates", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_insert_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Inserts", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_delete_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Deletes", + "refId": "D" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "SQL Statements", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:336", + "format": "short", + "label": "queries", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:337", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of statements which returned a planning or runtime error.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_failure_count{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Errors", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "SQL Statement Errors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:412", + "format": "short", + "label": "errors", + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:413", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of SQL statements that experienced contention.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 32, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_distsql_contended_queries_count{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Contention", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "SQL Statement Contention", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:489", + "format": "short", + "label": "queries", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:490", + "format": "short", + "label": "", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of flows on each node contributing to currently running distributed SQL statements.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(sql_distsql_flows_active{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"}[$__rate_interval])", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Active Flows for Distributed SQL Statements", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:107", + "format": "short", + "label": "flows", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:108", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Over the last minute, this node executed 99% of queries within this time. This time does not include network latency between the node and client.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 64 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(sql_service_latency_bucket{job=\"cockroachdb\",instance=~\"$node\",cluster=\"$cluster\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Service Latency: SQL, 99th percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:710", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:711", + "format": "short", + "label": "", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Over the last minute, this node executed 90% of queries within this time. This time does not include network latency between the node and client.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 72 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.90, rate(sql_service_latency_bucket{job=\"cockroachdb\",instance=~\"$node\",cluster=\"$cluster\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Service Latency: SQL, 90th percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:934", + "format": "ns", + "label": "latency", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:935", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 99th percentile of latency between query requests and responses over a 1 minute period. Values are displayed individually for each node.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 80 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.99,rate(exec_latency_bucket{job=\"cockroachdb\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "KV Execution Latency: 99th percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1084", + "format": "µs", + "label": "latency", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:1085", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 90th percentile of latency between query requests and responses over a 1 minute period. Values are displayed individually for each node.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 88 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.90, rate(exec_latency_bucket{job=\"cockroachdb\",instance=~\"$node\",cluster=\"$cluster\"}[$rate_interval]))", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "KV Execution Latency: 90th percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1160", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1161", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of transactions initiated, committed, rolled back, or aborted per second.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 96 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(sql_txn_begin_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Begin", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_txn_commit_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Commits", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate( sql_txn_rollback_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Rollbacks", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_txn_abort_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Aborts", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transactions", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1458", + "format": "short", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1459", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 99th percentile of total transaction time over a 1 minute period. Values are displayed individually for each node.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 104 + }, + "hiddenSeries": false, + "id": 26, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.99,rate(sql_txn_latency_bucket{job=\"cockroachdb\",cluster=\"$cluster\"}[5m]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction Latency: 99th percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1756", + "format": "µs", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1757", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 90th percentile of total transaction time over a 1 minute period. Values are displayed individually for each node.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 112 + }, + "hiddenSeries": false, + "id": 28, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "histogram_quantile(0.90,rate(sql_txn_latency_bucket{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transaction Latency: 90th percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1832", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1833", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The current amount of allocated SQL memory. This amount is what is compared against the node's --max-sql-memory flag.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 120 + }, + "hiddenSeries": false, + "id": 34, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sql_mem_root_current{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "SQL Memory", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:157", + "format": "bytes", + "label": "allocation bytes", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:158", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of DDL statements per second", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 128 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(sql_ddl_count{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$rate_interval])) ", + "interval": "", + "intervalFactor": 2, + "legendFormat": "DDL Statements", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Schema Changes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1908", + "format": "short", + "label": "statements", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1909", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The total number of statements denied per second due to a [cluster setting](https://www.cockroachlabs.com/docs/v21.1/cluster-settings.html) in the format feature.statement_type.enabled = FALSE.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 136 + }, + "hiddenSeries": false, + "id": 36, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(sql_feature_flag_denial{cluster=\"$cluster\",job=\"cockroachdb\",instance=~\"$node\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Statement Denials: Cluster Settings", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:214", + "format": "short", + "label": "statements", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:215", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "30s", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": "datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "drew-demo", + "value": "drew-demo" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "sys_uptime{job=\"cockroachdb\"}", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "sys_uptime{job=\"cockroachdb\"}", + "refId": "Prometheus-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "/cluster=\"([^\"]+)\"/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "hide": 0, + "includeAll": true, + "label": "Node", + "multi": false, + "name": "node", + "options": [], + "query": { + "query": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "refId": "Prometheus-node-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 3, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "1m", + "value": "1m" + }, + "hide": 0, + "label": "Rate Interval", + "name": "rate_interval", + "options": [ + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": true, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "America/New_York", + "title": "CRDB Console: SQL ", + "uid": "crdb-console-sql", + "version": 3, + "weekStart": "" +} diff --git a/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json new file mode 100644 index 00000000000..d08cd5e5918 --- /dev/null +++ b/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json @@ -0,0 +1,1347 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 6, + "links": [], + "liveNow": false, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Usage of disk space across all nodes\n\n**Capacity**: Maximum store size across all nodes. This value may be explicitly set per node using [--store](https://www.cockroachlabs.com/docs/v21.1/cockroach-start.html#store). If a store size has not been set, this metric displays the actual disk capacity.\n\n**Available**: Free disk space available to CockroachDB data across all nodes.\n\n**Used**: Disk space in use by CockroachDB data across all nodes. This excludes the Cockroach binary, operating system, and other system files.\n\n[How are these metrics calculated?](https://www.cockroachlabs.com/docs/v21.1/ui-storage-dashboard.html#capacity-metrics)", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(sum(capacity{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Max", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": " sum(sum(capacity_available{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "legendFormat": "Available", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(capacity{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance)) - sum(sum(capacity_available{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Used", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Capacity", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:99", + "format": "bytes", + "label": "capacity", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:100", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Amount of data that can be read by applications and CockroachDB.\n\n**Live**: Number of logical bytes stored in live [key-value pairs](https://www.cockroachlabs.com/docs/v21.1/architecture/distribution-layer.html#table-data) across all nodes. Live data excludes historical and deleted data.\n\n**System**: Number of physical bytes stored in [system key-value pairs](https://www.cockroachlabs.com/docs/v21.1/architecture/distribution-layer.html#table-data).", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(livebytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Live", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(sysbytes{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "System", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Live Bytes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:323", + "format": "bytes", + "label": "live bytes", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:324", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 99th %ile latency for commits to the Raft Log. This measures essentially an fdatasync to the storage engine's write-ahead log.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.4.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.99,rate(raft_process_logcommit_latency_bucket{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Log Commit Latency: 99th Percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:474", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:475", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 50th %ile latency for commits to the Raft Log. This measures essentially an fdatasync to the storage engine's write-ahead log.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.50,rate(raft_process_logcommit_latency_bucket{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Log Commit Latency: 50th Percentile", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:550", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:551", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 99th %ile latency for commits of Raft commands. This measures applying a batch to the storage engine (including writes to the write-ahead log), but no fsync.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 30 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.99,rate(raft_process_commandcommit_latency_bucket{job=\"cockroachdb\", instance=~\"$node\", cluster=~\"$cluster\"}[$__rate_interval]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Command Commit Latency: 99th Percentile ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:774", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:775", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The 50th %ile latency for commits of Raft commands. This measures applying a batch to the storage engine (including writes to the write-ahead log), but no fsync.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 38 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "histogram_quantile(0.50,rate(raft_process_commandcommit_latency_bucket{job=\"cockroachdb\", instance=~\"$node\", cluster=~\"$cluster\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Command Commit Latency: 50th percentile ", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:850", + "format": "ns", + "label": "latency", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:851", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The average number of real read operations executed per logical read operation.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 46 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "avg(avg(rocksdb_read_amplification{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "legendFormat": "Read Amplification", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Read Amplification", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:926", + "format": "short", + "label": "factor", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:927", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of SSTables in use.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 54 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rocksdb_num_sstables{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"})", + "interval": "", + "legendFormat": "SSTables", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "SSTables", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1002", + "format": "short", + "label": "sstables", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1003", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of open file descriptors, compared with the file descriptor limit.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 62 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(sys_fd_open{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Open", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(sum(sys_fd_softlimit{job=\"cockroachdb\",cluster=\"$cluster\",instance=~\"$node\"}) by (instance))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "Limit", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "File Descriptors", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1226", + "format": "short", + "label": "descriptors", + "logBase": 1, + "show": true + }, + { + "$$hashKey": "object:1227", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of compactions and memtable flushes per second.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 70 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(rocksdb_compactions{job=\"cockroachdb\",cluster=\"$cluster\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Compactions", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "sum(rate(rocksdb_flushes{job=\"cockroachdb\",cluster=\"$cluster\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Flushes", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Compactions/Flushes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1376", + "format": "short", + "label": "count", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1377", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of successfully written time series samples, and number of errors attempting to write time series, per second.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 78 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(timeseries_write_samples{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Samples Written", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(timeseries_write_errors{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Errors", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Time Series Writes", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1452", + "format": "short", + "label": "count", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1453", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "The number of bytes written by the time series system per second. \nNote that this does not reflect the rate at which disk space is consumed by time series; the data is highly compressed on disk. This rate is instead intended to indicate the amount of network traffic and disk activity generated by time series writes.\nSee the \"databases\" tab to find the current disk usage for time series data.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 86 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum(rate(timeseries_write_bytes{job=\"cockroachdb\",cluster=~\"$cluster\",instance=~\"$node\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Bytes Written", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Time Series Bytes Written", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:1528", + "format": "bytes", + "label": "bytes", + "logBase": 1, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:1529", + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "30s", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": "datasource", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "drew-demo", + "value": "drew-demo" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "sys_uptime{job=\"cockroachdb\"}", + "hide": 0, + "includeAll": false, + "label": "Cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "sys_uptime{job=\"cockroachdb\"}", + "refId": "Prometheus-cluster-Variable-Query" + }, + "refresh": 1, + "regex": "/cluster=\"([^\"]+)\"/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": "", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "hide": 0, + "includeAll": true, + "label": "Node", + "multi": false, + "name": "node", + "options": [], + "query": { + "query": "label_values(sys_uptime{job=\"cockroachdb\",cluster=\"$cluster\"},instance)", + "refId": "Prometheus-node-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "30s", + "value": "30s" + }, + "hide": 0, + "label": "Rate Interval", + "name": "rate_interval", + "options": [ + { + "selected": true, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "30s,1m,5m,10m,30m,1h,6h,12h,1d", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "America/New_York", + "title": "CRDB Console: Storage ", + "uid": "crdb-console-storage", + "version": 3, + "weekStart": "" +} diff --git a/src/current/files/cockroach/monitoring/prometheus.yml b/src/current/files/cockroach/monitoring/prometheus.yml new file mode 100644 index 00000000000..8e84600d4f2 --- /dev/null +++ b/src/current/files/cockroach/monitoring/prometheus.yml @@ -0,0 +1,35 @@ +# Prometheus configuration for cockroach clusters. +# Requires prometheus 2.X +# +# Run with: +# $ prometheus -config.file=prometheus.yml +global: + scrape_interval: 10s + evaluation_interval: 10s + +rule_files: +- "rules/alerts.rules.yml" +- "rules/aggregation.rules.yml" + +# Alert manager running on the same host: +alerting: + alertmanagers: + - path_prefix: "/alertmanager/" + static_configs: + - targets: + - localhost:9093 + +scrape_configs: + - job_name: 'cockroachdb' + metrics_path: '/_status/vars' + # Insecure mode: + scheme: 'http' + # Secure mode: + # scheme: 'https' + tls_config: + insecure_skip_verify: true + + static_configs: + - targets: ['localhost:8080'] + labels: + cluster: 'my-cockroachdb-cluster' diff --git a/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml b/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml new file mode 100644 index 00000000000..56e0bc8e604 --- /dev/null +++ b/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml @@ -0,0 +1,109 @@ +# This file contains aggregation rules, specifically: +# "node:X" node-level aggregation of a per-store metric X +# "cluster:X" cluster-level aggregation of a per-store or per-node metric X +# +# Most aggregation rules should use the "without (label1, label2, ...)" keyword +# to keep all labels but the ones specified. + +groups: +- name: rules/aggregation.rules + rules: + - record: node:capacity + expr: sum without(store) (capacity{job="cockroachdb"}) + - record: cluster:capacity + expr: sum without(instance) (node:capacity{job="cockroachdb"}) + - record: node:capacity_available + expr: sum without(store) (capacity_available{job="cockroachdb"}) + - record: cluster:capacity_available + expr: sum without(instance) (node:capacity_available{job="cockroachdb"}) + - record: capacity_available:ratio + expr: capacity_available{job="cockroachdb"} / capacity{job="cockroachdb"} + - record: node:capacity_available:ratio + expr: node:capacity_available{job="cockroachdb"} / node:capacity{job="cockroachdb"} + - record: cluster:capacity_available:ratio + expr: cluster:capacity_available{job="cockroachdb"} / cluster:capacity{job="cockroachdb"} + # Histogram rules: these are fairly expensive to compute live, so we precompute a few percetiles. + - record: txn_durations_bucket:rate1m + expr: rate(txn_durations_bucket{job="cockroachdb"}[1m]) + - record: txn_durations:rate1m:quantile_50 + expr: histogram_quantile(0.5, txn_durations_bucket:rate1m) + - record: txn_durations:rate1m:quantile_75 + expr: histogram_quantile(0.75, txn_durations_bucket:rate1m) + - record: txn_durations:rate1m:quantile_90 + expr: histogram_quantile(0.9, txn_durations_bucket:rate1m) + - record: txn_durations:rate1m:quantile_95 + expr: histogram_quantile(0.95, txn_durations_bucket:rate1m) + - record: txn_durations:rate1m:quantile_99 + expr: histogram_quantile(0.99, txn_durations_bucket:rate1m) + - record: exec_latency_bucket:rate1m + expr: rate(exec_latency_bucket{job="cockroachdb"}[1m]) + - record: exec_latency:rate1m:quantile_50 + expr: histogram_quantile(0.5, exec_latency_bucket:rate1m) + - record: exec_latency:rate1m:quantile_75 + expr: histogram_quantile(0.75, exec_latency_bucket:rate1m) + - record: exec_latency:rate1m:quantile_90 + expr: histogram_quantile(0.9, exec_latency_bucket:rate1m) + - record: exec_latency:rate1m:quantile_95 + expr: histogram_quantile(0.95, exec_latency_bucket:rate1m) + - record: exec_latency:rate1m:quantile_99 + expr: histogram_quantile(0.99, exec_latency_bucket:rate1m) + - record: round_trip_latency_bucket:rate1m + expr: rate(round_trip_latency_bucket{job="cockroachdb"}[1m]) + - record: round_trip_latency:rate1m:quantile_50 + expr: histogram_quantile(0.5, round_trip_latency_bucket:rate1m) + - record: round_trip_latency:rate1m:quantile_75 + expr: histogram_quantile(0.75, round_trip_latency_bucket:rate1m) + - record: round_trip_latency:rate1m:quantile_90 + expr: histogram_quantile(0.9, round_trip_latency_bucket:rate1m) + - record: round_trip_latency:rate1m:quantile_95 + expr: histogram_quantile(0.95, round_trip_latency_bucket:rate1m) + - record: round_trip_latency:rate1m:quantile_99 + expr: histogram_quantile(0.99, round_trip_latency_bucket:rate1m) + - record: sql_exec_latency_bucket:rate1m + expr: rate(sql_exec_latency_bucket{job="cockroachdb"}[1m]) + - record: sql_exec_latency:rate1m:quantile_50 + expr: histogram_quantile(0.5, sql_exec_latency_bucket:rate1m) + - record: sql_exec_latency:rate1m:quantile_75 + expr: histogram_quantile(0.75, sql_exec_latency_bucket:rate1m) + - record: sql_exec_latency:rate1m:quantile_90 + expr: histogram_quantile(0.9, sql_exec_latency_bucket:rate1m) + - record: sql_exec_latency:rate1m:quantile_95 + expr: histogram_quantile(0.95, sql_exec_latency_bucket:rate1m) + - record: sql_exec_latency:rate1m:quantile_99 + expr: histogram_quantile(0.99, sql_exec_latency_bucket:rate1m) + - record: raft_process_logcommit_latency_bucket:rate1m + expr: rate(raft_process_logcommit_latency_bucket{job="cockroachdb"}[1m]) + - record: raft_process_logcommit_latency:rate1m:quantile_50 + expr: histogram_quantile(0.5, raft_process_logcommit_latency_bucket:rate1m) + - record: raft_process_logcommit_latency:rate1m:quantile_75 + expr: histogram_quantile(0.75, raft_process_logcommit_latency_bucket:rate1m) + - record: raft_process_logcommit_latency:rate1m:quantile_90 + expr: histogram_quantile(0.9, raft_process_logcommit_latency_bucket:rate1m) + - record: raft_process_logcommit_latency:rate1m:quantile_95 + expr: histogram_quantile(0.95, raft_process_logcommit_latency_bucket:rate1m) + - record: raft_process_logcommit_latency:rate1m:quantile_99 + expr: histogram_quantile(0.99, raft_process_logcommit_latency_bucket:rate1m) + - record: raft_process_commandcommit_latency_bucket:rate1m + expr: rate(raft_process_commandcommit_latency_bucket{job="cockroachdb"}[1m]) + - record: raft_process_commandcommit_latency:rate1m:quantile_50 + expr: histogram_quantile(0.5, raft_process_commandcommit_latency_bucket:rate1m) + - record: raft_process_commandcommit_latency:rate1m:quantile_75 + expr: histogram_quantile(0.75, raft_process_commandcommit_latency_bucket:rate1m) + - record: raft_process_commandcommit_latency:rate1m:quantile_90 + expr: histogram_quantile(0.9, raft_process_commandcommit_latency_bucket:rate1m) + - record: raft_process_commandcommit_latency:rate1m:quantile_95 + expr: histogram_quantile(0.95, raft_process_commandcommit_latency_bucket:rate1m) + - record: raft_process_commandcommit_latency:rate1m:quantile_99 + expr: histogram_quantile(0.99, raft_process_commandcommit_latency_bucket:rate1m) + - record: storage_wal_fsync_latency_bucket:rate1m + expr: rate(storage_wal_fsync_latency_bucket{job="cockroachdb"}[1m]) + - record: storage_wal_fsync_latency:rate1m:quantile_50 + expr: histogram_quantile(0.5, storage_wal_fsync_latency_bucket:rate1m) + - record: storage_wal_fsync_latency:rate1m:quantile_75 + expr: histogram_quantile(0.75, storage_wal_fsync_latency_bucket:rate1m) + - record: storage_wal_fsync_latency:rate1m:quantile_90 + expr: histogram_quantile(0.9, storage_wal_fsync_latency_bucket:rate1m) + - record: storage_wal_fsync_latency:rate1m:quantile_95 + expr: histogram_quantile(0.95, storage_wal_fsync_latency_bucket:rate1m) + - record: storage_wal_fsync_latency:rate1m:quantile_99 + expr: histogram_quantile(0.99, storage_wal_fsync_latency_bucket:rate1m) diff --git a/src/current/files/cockroach/monitoring/rules/alerts.rules.yml b/src/current/files/cockroach/monitoring/rules/alerts.rules.yml new file mode 100644 index 00000000000..5d198a762bc --- /dev/null +++ b/src/current/files/cockroach/monitoring/rules/alerts.rules.yml @@ -0,0 +1,157 @@ +groups: +- name: rules/alerts.rules + rules: + # Alert for any instance that is unreachable for >15 minutes. + - alert: InstanceDead + expr: up{job="cockroachdb"} == 0 + for: 15m + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} has been + down for more than 15 minutes.' + summary: Instance {{ $labels.instance }} dead + # Alert for any instance that is not ready for a while. + - alert: InstanceNotReady + # This alert applies only to Kubernetes deployments and requires that you run kube-state-metrics: https://github.com/kubernetes/kube-state-metrics + expr: kube_statefulset_status_replicas_ready{statefulset="cockroachdb"} != kube_statefulset_status_replicas{statefulset="cockroachdb"} + for: 45m + annotations: + description: 'there has been an unready replica for cluster {{ $labels.cluster }} + for more than 15 minutes.' + summary: Instance not ready + # Alert on instance restarts. + - alert: InstanceRestart + expr: resets(sys_uptime{job="cockroachdb"}[24h]) > 1 + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} restarted + {{ $value }} time(s) in 24h' + summary: Instance {{ $labels.instance }} restarted + # Alert on flapping instances (frequent restarts). + - alert: InstancesFlapping + # Aggregated. + # This alert assumes that rolling restarts or rolling upgrades leave at least 3 minutes between each node being updated or restarted. + expr: sum by (cluster)(resets(sys_uptime{job="cockroachdb"}[5m])) > 2 + annotations: + description: 'instances in cluster {{ $labels.cluster }} restarted + {{ $value }} time(s) in 5m' + summary: Instances in {{ $labels.cluster }} flapping + # Alert on flapping instances (frequent restarts). + - alert: InstanceFlapping + # Un-aggregated. + expr: resets(sys_uptime{job="cockroachdb"}[10m]) > 1 + annotations: + description: '{{ $labels.instance }} for cluster {{ $labels.cluster }} restarted + {{ $value }} time(s) in 10m' + summary: Instance {{ $labels.instance }} flapping + # Alert on version mismatch. + # This alert is intentionally loose (4 hours) to allow for rolling upgrades. + # This may need to be adjusted for large clusters. + - alert: VersionMismatch + expr: count by(cluster) (count_values by(tag, cluster) ("version", build_timestamp{job="cockroachdb"})) + > 1 + for: 4h + annotations: + description: Cluster {{ $labels.cluster }} running {{ $value }} different versions + summary: Binary version mismatch on {{ $labels.cluster }} + # Available capacity alerts. + - alert: StoreDiskLow + expr: capacity_available:ratio{job="cockroachdb"} < 0.15 + annotations: + summary: Store {{ $labels.store }} on node {{ $labels.instance }} at {{ $value + }} available disk fraction + - alert: ClusterDiskLow + expr: cluster:capacity_available:ratio{job="cockroachdb"} < 0.2 + annotations: + summary: Cluster {{ $labels.cluster }} at {{ $value }} available disk fraction + # Unavailable ranges. + - alert: UnavailableRanges + expr: (sum by(instance, cluster) (ranges_unavailable{job="cockroachdb"})) > 0 + for: 10m + annotations: + summary: Instance {{ $labels.instance }} has {{ $value }} unavailable ranges + # Cockroach-measured clock offset nearing limit (by default, servers kill themselves at 400ms from the mean, so alert at 300ms) + - alert: ClockOffsetNearMax + expr: clock_offset_meannanos{job="cockroachdb"} > 300 * 1000 * 1000 + for: 5m + annotations: + summary: Clock on {{ $labels.instance }} as measured by cockroach is offset by {{ $value }} nanoseconds from the cluster mean # Certificate expiration. Alerts are per node. + - alert: CACertificateExpiresSoon + expr: (security_certificate_expiration_ca{job="cockroachdb"} > 0) and (security_certificate_expiration_ca{job="cockroachdb"} + - time()) < 86400 * 366 + labels: + frequency: daily + annotations: + summary: CA certificate for {{ $labels.instance }} expires in less than a year + - alert: ClientCACertificateExpiresSoon + expr: (security_certificate_expiration_client_ca{job="cockroachdb"} > 0) and (security_certificate_expiration_client_ca{job="cockroachdb"} + - time()) < 86400 * 366 + labels: + frequency: daily + annotations: + summary: Client CA certificate for {{ $labels.instance }} expires in less than a year + - alert: UICACertificateExpiresSoon + expr: (security_certificate_expiration_ui_ca{job="cockroachdb"} > 0) and (security_certificate_expiration_ui_ca{job="cockroachdb"} + - time()) < 86400 * 366 + labels: + frequency: daily + annotations: + summary: UI CA certificate for {{ $labels.instance }} expires in less than a year + - alert: NodeCertificateExpiresSoon + expr: (security_certificate_expiration_node{job="cockroachdb"} > 0) and (security_certificate_expiration_node{job="cockroachdb"} + - time()) < 86400 * 183 + labels: + frequency: daily + annotations: + summary: Node certificate for {{ $labels.instance }} expires in less than six months + - alert: NodeClientCertificateExpiresSoon + expr: (security_certificate_expiration_node_client{job="cockroachdb"} > 0) and (security_certificate_expiration_node_client{job="cockroachdb"} + - time()) < 86400 * 183 + labels: + frequency: daily + annotations: + summary: Client certificate for {{ $labels.instance }} expires in less than six months + - alert: UICertificateExpiresSoon + expr: (security_certificate_expiration_ui{job="cockroachdb"} > 0) and (security_certificate_expiration_ui{job="cockroachdb"} + - time()) < 86400 * 20 + labels: + frequency: daily + annotations: + summary: UI certificate for {{ $labels.instance }} expires in less than 20 days + # Slow Latch/Lease/Raft requests. + - alert: SlowLatchRequest + expr: requests_slow_latch{job="cockroachdb"} > 0 + for: 5m + labels: + severity: testing + annotations: + summary: '{{ $value }} slow latch requests on {{ $labels.instance }}' + - alert: SlowLeaseRequest + expr: requests_slow_lease{job="cockroachdb"} > 0 + for: 5m + labels: + severity: testing + annotations: + summary: '{{ $value }} slow lease requests on {{ $labels.instance }}' + - alert: SlowRaftRequest + expr: requests_slow_raft{job="cockroachdb"} > 0 + for: 5m + labels: + severity: testing + annotations: + summary: '{{ $value }} slow raft requests on {{ $labels.instance }}' + # Getting close to open file descriptor limit. + - alert: HighOpenFDCount + expr: sys_fd_open{job="cockroachdb"} / sys_fd_softlimit{job="cockroachdb"} > 0.8 + for: 10m + annotations: + summary: 'Too many open file descriptors on {{ $labels.instance }}: {{ $value + }} fraction used' + # Prometheus disk getting full. + - alert: PrometheusDiskLow + expr: node_filesystem_free{cluster="prometheus",job="node_exporter_prometheus",mountpoint="/data"} + / node_filesystem_size{cluster="prometheus",job="node_exporter_prometheus",mountpoint="/data"} + < 0.2 + for: 10m + labels: + severity: testing + annotations: + summary: 'Prometheus storage is almost full: {{ $value }} fraction free' \ No newline at end of file diff --git a/src/current/releases/downloads-archive.md b/src/current/releases/downloads-archive.md index 5709dd18e08..2a358586408 100644 --- a/src/current/releases/downloads-archive.md +++ b/src/current/releases/downloads-archive.md @@ -478,7 +478,7 @@ CockroachDB {{ v.major_version }} is partially supported. Pre-LTS patches (befor
-

The source code for CockroachDB is hosted in the cockroachdb/cockroach repository on Github.

+

The source code for CockroachDB is hosted in the cockroachdb/cockroach repository on Github.

@@ -520,7 +520,7 @@ CockroachDB {{ v.major_version }} is partially supported. Pre-LTS patches (befor {% else %} {% comment %} Add download links for all non-withdrawn versions. {% endcomment %}
{% if r.source == true %} - View on Github + View on Github {% else %} N/A {% endif %} diff --git a/src/current/releases/index.md b/src/current/releases/index.md index 0dfd3317042..7f00e7f91fa 100644 --- a/src/current/releases/index.md +++ b/src/current/releases/index.md @@ -92,7 +92,7 @@ All CockroachDB binaries released on or after the day 24.3.0 is released onward, All CockroachDB binaries released prior to the release date of 24.3.0 are variously licensed under the Business Source License 1.1 (BSL), the CockroachDB Community License (CCL), and other licenses specified in the source code. -To review the CCL, refer to the [CockroachDB Community License](https://www.cockroachlabs.com/cockroachdb-community-license) page. You can find the applicable Business Source License or third party licenses by reviewing these in the `licenses` folder for the applicable version of CockroachDB in the GitHub repository [cockroachdb/cockroach](https://github.com/cockroachdb/cockroach). See individual files for details. +To review the CCL, refer to the [CockroachDB Community License](https://www.cockroachlabs.com/cockroachdb-community-license) page. You can find the applicable Business Source License or third party licenses by reviewing these in the `licenses` folder for the applicable version of CockroachDB in the GitHub repository cockroachdb/cockroach. See individual files for details. In late 2024, Cockroach Labs retired its Core offering to consolidate on a single CockroachDB Enterprise offering under the CockroachDB Software License. This license is available at no charge for individual users and small businesses, and offers all users, free and paid, the full breadth of CockroachDB capabilities. For details, refer to the [CockroachDB licensing update](https://www.cockroachlabs.com/enterprise-license-update/) and [Licensing FAQs]({% link {{site.versions["stable"]}}/licensing-faqs.md %}). diff --git a/src/current/v23.1/admission-control.md b/src/current/v23.1/admission-control.md index f7c36afea13..c80b3f91157 100644 --- a/src/current/v23.1/admission-control.md +++ b/src/current/v23.1/admission-control.md @@ -97,6 +97,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also -The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +The [technical note for admission control](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md) for details on the design of the admission control system. {% include {{page.version.version}}/sql/server-side-connection-limit.md %} This may be useful in addition to your admission control settings. diff --git a/src/current/v23.1/alter-changefeed.md b/src/current/v23.1/alter-changefeed.md index 15a860c7010..e18af7d0403 100644 --- a/src/current/v23.1/alter-changefeed.md +++ b/src/current/v23.1/alter-changefeed.md @@ -238,8 +238,8 @@ For further discussion on using the `FAMILY` keyword and `split_column_families` ## Known limitations -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any `ALTER CHANGEFEED` statement. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/77171) -- `ALTER CHANGEFEED` will accept duplicate targets without sending an error. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/78285) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any `ALTER CHANGEFEED` statement. Tracking GitHub Issue +- `ALTER CHANGEFEED` will accept duplicate targets without sending an error. Tracking GitHub Issue - {% include {{ page.version.version }}/known-limitations/alter-changefeed-cdc-queries.md %} - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: diff --git a/src/current/v23.1/alter-table.md b/src/current/v23.1/alter-table.md index c7628b37115..76e54daa286 100644 --- a/src/current/v23.1/alter-table.md +++ b/src/current/v23.1/alter-table.md @@ -264,7 +264,7 @@ For examples, see [Drop columns](#drop-columns). #### Known limitations -- CockroachDB prevents a column from being dropped if it is referenced by a [partial index]({% link {{ page.version.version }}/partial-indexes.md %}) predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See [tracking issue](https://github.com/cockroachdb/cockroach/issues/97813). +- CockroachDB prevents a column from being dropped if it is referenced by a [partial index]({% link {{ page.version.version }}/partial-indexes.md %}) predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See tracking issue. #### Required privileges diff --git a/src/current/v23.1/alter-view.md b/src/current/v23.1/alter-view.md index 47de30984d8..5b3768ab8a5 100644 --- a/src/current/v23.1/alter-view.md +++ b/src/current/v23.1/alter-view.md @@ -36,7 +36,7 @@ Parameter | Description CockroachDB does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future (see [tracking issue](https://github.com/cockroachdb/cockroach/issues/10083)). +- Renaming a view that other views depend on. This feature may be added in the future (see tracking issue). ## Examples diff --git a/src/current/v23.1/architecture/sql-layer.md b/src/current/v23.1/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v23.1/architecture/sql-layer.md +++ b/src/current/v23.1/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v23.1/cluster-settings.md b/src/current/v23.1/cluster-settings.md index 1aa538172f8..97f4ac2141d 100644 --- a/src/current/v23.1/cluster-settings.md +++ b/src/current/v23.1/cluster-settings.md @@ -21,7 +21,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v23.1/cluster-setup-troubleshooting.md b/src/current/v23.1/cluster-setup-troubleshooting.md index 79d98251f18..c30831bea39 100644 --- a/src/current/v23.1/cluster-setup-troubleshooting.md +++ b/src/current/v23.1/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v23.1/cockroachdb-feature-availability.md b/src/current/v23.1/cockroachdb-feature-availability.md index 53a57a145f3..64828570b42 100644 --- a/src/current/v23.1/cockroachdb-feature-availability.md +++ b/src/current/v23.1/cockroachdb-feature-availability.md @@ -130,7 +130,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -165,7 +165,7 @@ CockroachDB supports [altering the column types]({% link {{ page.version.version ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Performance limitations could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Performance limitations could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v23.1/common-table-expressions.md b/src/current/v23.1/common-table-expressions.md index e9667131380..32f5aa3d158 100644 --- a/src/current/v23.1/common-table-expressions.md +++ b/src/current/v23.1/common-table-expressions.md @@ -444,7 +444,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v23.1/configure-replication-zones.md b/src/current/v23.1/configure-replication-zones.md index cbf8256b2b4..f8a0d38a80c 100644 --- a/src/current/v23.1/configure-replication-zones.md +++ b/src/current/v23.1/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v23.1/create-sequence.md b/src/current/v23.1/create-sequence.md index eba6f3a07f7..389d71675ed 100644 --- a/src/current/v23.1/create-sequence.md +++ b/src/current/v23.1/create-sequence.md @@ -58,7 +58,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.1/create-table.md b/src/current/v23.1/create-table.md index 0976255a0d8..37348651adc 100644 --- a/src/current/v23.1/create-table.md +++ b/src/current/v23.1/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see the [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see the Tracking GitHub Issue. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v23.1/disaster-recovery.md b/src/current/v23.1/disaster-recovery.md index 0b35574d349..2b14e296991 100644 --- a/src/current/v23.1/disaster-recovery.md +++ b/src/current/v23.1/disaster-recovery.md @@ -321,7 +321,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v23.1/eventlog.md b/src/current/v23.1/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v23.1/eventlog.md +++ b/src/current/v23.1/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.1/example-apps.md b/src/current/v23.1/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v23.1/example-apps.md +++ b/src/current/v23.1/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v23.1/file-an-issue.md b/src/current/v23.1/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v23.1/file-an-issue.md +++ b/src/current/v23.1/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v23.1/fips.md b/src/current/v23.1/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v23.1/fips.md +++ b/src/current/v23.1/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v23.1/foreign-key.md b/src/current/v23.1/foreign-key.md index d18599494af..ffe1f8e06c8 100644 --- a/src/current/v23.1/foreign-key.md +++ b/src/current/v23.1/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v23.1/full-text-search.md b/src/current/v23.1/full-text-search.md index 2959def0e33..558d0d120b1 100644 --- a/src/current/v23.1/full-text-search.md +++ b/src/current/v23.1/full-text-search.md @@ -460,7 +460,7 @@ Some PostgreSQL syntax and features are unsupported. These include, but are not - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -For full details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/41288). +For full details, see the tracking issue. ## See also diff --git a/src/current/v23.1/functions-and-operators.md b/src/current/v23.1/functions-and-operators.md index 550ba99eb0a..cd841e81d36 100644 --- a/src/current/v23.1/functions-and-operators.md +++ b/src/current/v23.1/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v23.1/install-client-drivers.md b/src/current/v23.1/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v23.1/install-client-drivers.md +++ b/src/current/v23.1/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
diff --git a/src/current/v23.1/install-cockroachdb-mac.md b/src/current/v23.1/install-cockroachdb-mac.md index 177c2863226..79d5666a5bd 100644 --- a/src/current/v23.1/install-cockroachdb-mac.md +++ b/src/current/v23.1/install-cockroachdb-mac.md @@ -19,7 +19,7 @@ See [Release Notes]({% link releases/{{page.version.version}}.md %}) for what's {% comment %}v22.2.0+{% endcomment %} {{site.data.alerts.callout_danger}} -

On macOS ARM systems, spatial features are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead use Rosetta to run the Intel binary or use the Docker image distribution. Refer to GitHub tracking issue for more information.

+

On macOS ARM systems, spatial features are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead use Rosetta to run the Intel binary or use the Docker image distribution. Refer to GitHub tracking issue for more information.

{{site.data.alerts.end}} {% capture arch_note_homebrew %}

For CockroachDB v22.2.x and above, Homebrew installs binaries for your system architecture, either Intel or ARM (Apple Silicon).

For previous releases, Homebrew installs Intel binaries. Intel binaries can run on ARM systems, but with a significant reduction in performance. CockroachDB on ARM for macOS is experimental and is not yet qualified for production use and not eligible for support or uptime SLA commitments.

{% endcapture %} diff --git a/src/current/v23.1/intellij-idea.md b/src/current/v23.1/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v23.1/intellij-idea.md +++ b/src/current/v23.1/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v23.1/jsonb.md b/src/current/v23.1/jsonb.md index 221e075ee1d..93f7fe6ec7d 100644 --- a/src/current/v23.1/jsonb.md +++ b/src/current/v23.1/jsonb.md @@ -89,11 +89,11 @@ To [index]({% link {{ page.version.version }}/indexes.md %}) a `JSONB` column yo - You cannot [order]({% link {{ page.version.version }}/order-by.md %}) queries using `JSONB` and `JSON`-typed columns. - [Tracking issue](https://github.com/cockroachdb/cockroach/issues/35706) + Tracking issue - If the execution of a [join]({% link {{ page.version.version }}/joins.md %}) query exceeds the limit set for [memory-buffering operations]({% link {{ page.version.version }}/vectorized-execution.md %}#disk-spilling-operations) (i.e., the value set for the `sql.distsql.temp_storage.workmem` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %})), CockroachDB will spill the intermediate results of computation to disk. If the join operation spills to disk, and at least one of the columns is of type `JSON`, CockroachDB returns the error `unable to encode table key: *tree.DJSON`. If the memory limit is not reached, then the query will be processed without error. - [Tracking issue](https://github.com/cockroachdb/cockroach/issues/35706) + Tracking issue ## Examples diff --git a/src/current/v23.1/known-limitations.md b/src/current/v23.1/known-limitations.md index 61251abbb9a..1754b8fc34a 100644 --- a/src/current/v23.1/known-limitations.md +++ b/src/current/v23.1/known-limitations.md @@ -14,31 +14,31 @@ This page describes newly identified limitations in the CockroachDB {{page.relea - [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain-analyze.md %}) does not collect inverted statistics on columns that are indexed with both forward and inverted indexes; only forward statistics are collected for those columns. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/92036) + Tracking GitHub issue - [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain-analyze.md %}) does not support the `AS OF SYSTEM TIME` syntax. Use [`CREATE STATISTICS ... AS OF SYSTEM TIME`]({% link {{ page.version.version }}/create-statistics.md %}#create-statistics-as-of-a-given-time) instead. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/96430) + Tracking GitHub issue ### Limitations for index recommendations - [Index]({% link {{ page.version.version }}/indexes.md %}) recommendations are not aware of [hash sharding]({% link {{ page.version.version }}/hash-sharded-indexes.md %}). - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/84681) + Tracking GitHub issue - CockroachDB does not support [index]({% link {{ page.version.version }}/indexes.md %}) recommendations on [`REGIONAL BY ROW` tables]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables). - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/84680) + Tracking GitHub issue ### Limitations for `SELECT FOR UPDATE` - [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) places locks on each key scanned by the base index scan. This means that even if some of those keys are later filtered out by a predicate which could not be pushed into the scan, they will still be locked. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/75457) + Tracking GitHub issue - [`SELECT FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) only places an unreplicated lock on the index being scanned by the query. This diverges from PostgreSQL, which aquires a lock on all indexes. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/57031) + Tracking GitHub issue ### Limitations for composite types @@ -50,31 +50,31 @@ This page describes newly identified limitations in the CockroachDB {{page.relea [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/92961) +Tracking GitHub issue ### Low estimated Request Units are rounded to zero The [Request Units]({% link cockroachcloud/plan-your-cluster-basic.md %}#request-units) (RUs) estimate surfaced in [`EXPLAIN ANALYZE`]({% link {{ page.version.version }}/explain-analyze.md %}) is displayed as an integer value. Because of this, fractional RU estimates, which represent very inexpensive queries, are rounded down to zero. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/100617) +Tracking GitHub issue ### `AS OF SYSTEM TIME` does not support placeholders CockroachDB does not support placeholders in [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}). The time value must be embedded in the SQL string. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/30955) +Tracking GitHub issue ### `null_ordered_last` does not produce correct results with tuples By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL`s after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it is broken when the ordering column is a tuple. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93558) +Tracking GitHub issue ### Inverted join for `tsvector` and `tsquery` types is not supported CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/102731) +Tracking GitHub issue ### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -85,7 +85,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/94430) +Tracking GitHub issue ### Execution locality in changefeeds @@ -101,15 +101,15 @@ This is because the state flip is effected by the CLI program at the end. Only t - Expressions (column, index, constraint) in tables. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Views. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue #### Limitations on expressions allowed within UDFs @@ -117,15 +117,15 @@ The following are not currently allowed within the body of a [UDF]({% link {{ pa - Mutation statements such as `INSERT`, `UPDATE`, `DELETE`, and `UPSERT`. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87289) + Tracking GitHub issue - Common table expressions (CTEs). - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/92961) + Tracking GitHub issue - References to other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue ### Table-level restore will not restore user-defined functions @@ -135,7 +135,7 @@ The following are not currently allowed within the body of a [UDF]({% link {{ pa In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/82774) +Tracking GitHub issue ### Limitations for `DROP OWNED BY` @@ -149,7 +149,7 @@ In cases where the partition definition includes a comparison with `NULL` and a [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. This is expected to be resolved in an upcoming 22.2 patch release. -[GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +GitHub tracking issue ### Limited SQL cursor support @@ -165,13 +165,13 @@ The following PostgreSQL syntax and features are currently unsupported for [trig {% include {{ page.version.version }}/sql/trigram-unsupported-syntax.md %} -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/41285) +Tracking GitHub Issue ### A multi-region table cannot be restored into a non-multi-region table You cannot [restore]({% link {{ page.version.version }}/restore.md %}) a multi-region table into a non-multi-region table. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71502) +Tracking GitHub Issue ### Statements containing multiple modification subqueries of the same table are disallowed @@ -182,25 +182,25 @@ Statements containing multiple modification subqueries mutating the same row cou Note that if multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/70731) +Tracking GitHub Issue ### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/70473) +Tracking GitHub Issue ### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/69540) +Tracking GitHub Issue ### CockroachDB does not allow inverted indexes with `STORING` CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/88278) +Tracking GitHub Issue ### CockroachDB does not properly optimize some left and anti joins with GIN indexes @@ -351,7 +351,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/59649) +Tracking GitHub Issue ### Using `RESTORE` with multi-region table localities @@ -359,7 +359,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. - {% include {{ page.version.version }}/known-limitations/restore-multiregion-match.md %} -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71071) +Tracking GitHub Issue ### `SET` does not `ROLLBACK` in a transaction @@ -395,8 +395,8 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. {% include {{page.version.version}}/known-limitations/stats-refresh-upgrade.md %} {% include {{ page.version.version }}/known-limitations/forecasted-stats-limitations.md %} -- When a table is dropped, the related rows in `system.table_statistics` are not deleted. CockroachDB does not delete historical statistics. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/94195) -- CockroachDB does not collect statistics for [virtual computed columns]({% link {{ page.version.version }}/computed-columns.md %}). This can prevent the [optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) from accurately calculating the cost of scanning an index on a virtual column, and, transitively, the cost of scanning an [expression index]({% link {{ page.version.version }}/expression-indexes.md %}). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/68254) +- When a table is dropped, the related rows in `system.table_statistics` are not deleted. CockroachDB does not delete historical statistics. Tracking GitHub issue +- CockroachDB does not collect statistics for [virtual computed columns]({% link {{ page.version.version }}/computed-columns.md %}). This can prevent the [optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) from accurately calculating the cost of scanning an index on a virtual column, and, transitively, the cost of scanning an [expression index]({% link {{ page.version.version }}/expression-indexes.md %}). Tracking GitHub issue ### Differences in syntax and behavior between CockroachDB and PostgreSQL @@ -408,7 +408,7 @@ For a list of known differences in syntax and behavior between CockroachDB and P CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/53170) +Tracking GitHub Issue ### Spatial support limitations @@ -416,41 +416,41 @@ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ - Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/49203) + Tracking GitHub Issue - The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/49402) + Tracking GitHub Issue - The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/49448) + Tracking GitHub Issue - CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/56124) + Tracking GitHub Issue - CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/55903) + Tracking GitHub Issue - CockroachDB does not yet support Triangle or [`TIN`](https://wikipedia.org/wiki/Triangulated_irregular_network) spatial shapes. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/56196) + Tracking GitHub Issue - CockroachDB does not yet support Curve, MultiCurve, or CircularString spatial shapes. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/56199) + Tracking GitHub Issue - CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/55227) + Tracking GitHub Issue - CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). Note that, in [`IMPORT PGDUMP`]({% link molt/migrate-to-cockroachdb.md %}) output, [`GEOMETRY` and `GEOGRAPHY`]({% link {{ page.version.version }}/export-spatial-data.md %}) data type names are prefixed by `public.`. If the type has a type modifier, you must remove the `public.` from the type name in order for the statements to work in CockroachDB. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/56492) + Tracking GitHub Issue ### Enterprise `BACKUP` does not capture database/table/column comments @@ -458,7 +458,7 @@ The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statemen As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/44396) +Tracking GitHub Issue ### `SHOW BACKUP` does not support symlinks for nodelocal @@ -474,21 +474,21 @@ The use of tables with very large primary or secondary index keys (>32KB) can re To work around this issue, we recommend limiting the size of primary and secondary keys to 4KB, which you must account for manually. Note that most columns are 8B (exceptions being `STRING` and `JSON`), which still allows for very complex key structures. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/30515) +Tracking GitHub Issue ### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/30192) +Tracking GitHub Issue -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/35706) +Tracking GitHub Issue ### Current sequence value not checked when updating min/max value Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/23719) +Tracking GitHub Issue ### Using `default_int_size` session variable in batch of statements @@ -496,7 +496,7 @@ When setting the `default_int_size` [session variable]({% link {{ page.version.v As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/32846) +Tracking GitHub Issue ### `COPY` syntax not supported by CockroachDB @@ -510,7 +510,7 @@ As a workaround, set `default_int_size` via your database driver, or ensure that {% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/19464) +Tracking GitHub Issue ### Dropping a single partition @@ -545,7 +545,7 @@ ERROR: nextval(): unimplemented: cannot evaluate scalar expressions containing s SQLSTATE: 0A000 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/42508) +Tracking GitHub Issue ### Available capacity metric in the DB Console @@ -621,7 +621,7 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/10679) +Tracking GitHub Issue ### Max size of a single column family @@ -653,10 +653,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/46414) +Tracking GitHub Issue ### CockroachDB does not test for all connection failure scenarios @@ -668,7 +668,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/53410) +Tracking GitHub Issue ### Some column-dropping schema changes do not roll back properly @@ -676,11 +676,11 @@ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.m In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/46541) +Tracking GitHub Issue In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/47712) +Tracking GitHub Issue To reduce the chance that a column drop will roll back incorrectly: diff --git a/src/current/v23.1/kubernetes-performance.md b/src/current/v23.1/kubernetes-performance.md index 47aef82797d..356ecb6e2fb 100644 --- a/src/current/v23.1/kubernetes-performance.md +++ b/src/current/v23.1/kubernetes-performance.md @@ -20,9 +20,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -332,7 +332,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v23.1/log-formats.md b/src/current/v23.1/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v23.1/log-formats.md +++ b/src/current/v23.1/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.1/logging.md b/src/current/v23.1/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v23.1/logging.md +++ b/src/current/v23.1/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.1/migrate-from-avro.md b/src/current/v23.1/migrate-from-avro.md index 0ff5cc230bc..82e6e8cd09f 100644 --- a/src/current/v23.1/migrate-from-avro.md +++ b/src/current/v23.1/migrate-from-avro.md @@ -163,7 +163,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/ccl/importccl/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v23.1/monitor-cockroachdb-kubernetes.md b/src/current/v23.1/monitor-cockroachdb-kubernetes.md index c36cdcabae0..6249e1f82b1 100644 --- a/src/current/v23.1/monitor-cockroachdb-kubernetes.md +++ b/src/current/v23.1/monitor-cockroachdb-kubernetes.md @@ -128,7 +128,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -177,14 +177,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -214,12 +214,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -244,12 +244,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v23.1/monitor-cockroachdb-with-prometheus.md b/src/current/v23.1/monitor-cockroachdb-with-prometheus.md index 493b6cce1c0..0f59d33c8d5 100644 --- a/src/current/v23.1/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v23.1/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml \ + $ wget https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml \ -O prometheus.yml ~~~ @@ -61,7 +61,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -75,12 +75,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - $ wget -P rules https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + $ wget -P rules https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - $ wget -P rules https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + $ wget -P rules https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -109,7 +109,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -174,29 +174,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + $ wget https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + $ wget https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + $ wget https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - $ wget https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + $ wget https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v23.1/monitoring-and-alerting.md b/src/current/v23.1/monitoring-and-alerting.md index 43f2ae9b4a7..908e7048b65 100644 --- a/src/current/v23.1/monitoring-and-alerting.md +++ b/src/current/v23.1/monitoring-and-alerting.md @@ -969,7 +969,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -978,7 +978,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -986,7 +986,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -994,7 +994,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1010,7 +1010,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1018,7 +1018,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v23.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v23.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 272d50f856a..2a37d12a609 100644 --- a/src/current/v23.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v23.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v23.1/partial-indexes.md b/src/current/v23.1/partial-indexes.md index ba2b096d089..137429b4674 100644 --- a/src/current/v23.1/partial-indexes.md +++ b/src/current/v23.1/partial-indexes.md @@ -88,9 +88,9 @@ You can force queries [to use a specific partial index]({% link {{ page.version. ## Known limitations -- CockroachDB does not currently support [`IMPORT`]({% link {{ page.version.version }}/import.md %}) statements on tables with partial indexes. See [tracking issue](https://github.com/cockroachdb/cockroach/issues/50225). -- CockroachDB does not currently support multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE`, and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. See [tracking issue](https://github.com/cockroachdb/cockroach/issues/53170). -- CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See [tracking issue](https://github.com/cockroachdb/cockroach/issues/97813). +- CockroachDB does not currently support [`IMPORT`]({% link {{ page.version.version }}/import.md %}) statements on tables with partial indexes. See tracking issue. +- CockroachDB does not currently support multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE`, and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. See tracking issue. +- CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See tracking issue. ## Examples diff --git a/src/current/v23.1/postgresql-compatibility.md b/src/current/v23.1/postgresql-compatibility.md index 319e81338f0..56cf103b710 100644 --- a/src/current/v23.1/postgresql-compatibility.md +++ b/src/current/v23.1/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/35370) +Tracking GitHub Issue ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/46563) +Tracking GitHub Issue ### SQL Compatibility diff --git a/src/current/v23.1/query-spatial-data.md b/src/current/v23.1/query-spatial-data.md index b4e926c9384..d4c40d031ba 100644 --- a/src/current/v23.1/query-spatial-data.md +++ b/src/current/v23.1/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v23.1/restore.md b/src/current/v23.1/restore.md index f78885b94eb..8904c466ed5 100644 --- a/src/current/v23.1/restore.md +++ b/src/current/v23.1/restore.md @@ -219,11 +219,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies @@ -588,7 +588,7 @@ For more detail on using this option with `BACKUP`, see [Incremental backups wit ## Known limitations -- To successfully [restore a table into a multi-region database](#restoring-to-multi-region-databases), it is necessary for the order and regions to match between the source and destination database. See the [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#using-restore-with-multi-region-table-localities) page for detail on ordering and matching regions. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71071) +- To successfully [restore a table into a multi-region database](#restoring-to-multi-region-databases), it is necessary for the order and regions to match between the source and destination database. See the [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#using-restore-with-multi-region-table-localities) page for detail on ordering and matching regions. Tracking GitHub Issue - {% include {{ page.version.version }}/known-limitations/restore-tables-non-multi-reg.md %} - {% include {{ page.version.version }}/known-limitations/restore-udf.md %} diff --git a/src/current/v23.1/schedule-cockroachdb-kubernetes.md b/src/current/v23.1/schedule-cockroachdb-kubernetes.md index 973d234ffa9..de53f7c8449 100644 --- a/src/current/v23.1/schedule-cockroachdb-kubernetes.md +++ b/src/current/v23.1/schedule-cockroachdb-kubernetes.md @@ -108,7 +108,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the Operator's custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v23.1/spatial-tutorial.md b/src/current/v23.1/spatial-tutorial.md index a04cb3b0c44..bc1a515ec14 100644 --- a/src/current/v23.1/spatial-tutorial.md +++ b/src/current/v23.1/spatial-tutorial.md @@ -964,7 +964,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v23.1/sql-feature-support.md b/src/current/v23.1/sql-feature-support.md index fd12689d09d..a1dd1f68a6e 100644 --- a/src/current/v23.1/sql-feature-support.md +++ b/src/current/v23.1/sql-feature-support.md @@ -191,9 +191,9 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Materialized views | ✓ | Common Extension | [Materialized views documentation]({% link {{ page.version.version }}/views.md %}#materialized-views) Window functions | ✓ | Standard | [Window Functions documentation]({% link {{ page.version.version }}/window-functions.md %}) Common table expressions | Partial | Common Extension | [Common Table Expressions documentation]({% link {{ page.version.version }}/common-table-expressions.md %}) - Stored procedures | ✗ | Common Extension | Execute a procedure explicitly. [GitHub issue tracking stored procedures support](https://github.com/cockroachdb/cockroach/issues/17511). + Stored procedures | ✗ | Common Extension | Execute a procedure explicitly. GitHub issue tracking stored procedures support. Cursors | Partial | Standard | [Cursors documentation]({% link {{ page.version.version }}/cursors.md %}) - Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. [GitHub issue tracking trigger support](https://github.com/cockroachdb/cockroach/issues/28296). + Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. GitHub issue tracking trigger support. Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v23.1/sql-name-resolution.md b/src/current/v23.1/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v23.1/sql-name-resolution.md +++ b/src/current/v23.1/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v23.1/srid-4326.md b/src/current/v23.1/srid-4326.md index cfd15368e47..c344ee62835 100644 --- a/src/current/v23.1/srid-4326.md +++ b/src/current/v23.1/srid-4326.md @@ -114,7 +114,7 @@ ERROR: st_contains(): operation on mixed SRIDs forbidden: (Point, 0) != (Point, ## Known limitations {{site.data.alerts.callout_info}} -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. For more information, see the tracking issue [cockroachdb/cockroach#55903](https://github.com/cockroachdb/cockroach/issues/55903). +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. For more information, see the tracking issue cockroachdb/cockroach#55903. {{site.data.alerts.end}} ## See also diff --git a/src/current/v23.1/st_union.md b/src/current/v23.1/st_union.md index 581f9db7f9b..74f60413e93 100644 --- a/src/current/v23.1/st_union.md +++ b/src/current/v23.1/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.1/temporary-tables.md b/src/current/v23.1/temporary-tables.md index bd3a9adefdd..0eda33fc45f 100644 --- a/src/current/v23.1/temporary-tables.md +++ b/src/current/v23.1/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.1/trigram-indexes.md b/src/current/v23.1/trigram-indexes.md index 474b258163a..aeb27eb78e0 100644 --- a/src/current/v23.1/trigram-indexes.md +++ b/src/current/v23.1/trigram-indexes.md @@ -350,7 +350,7 @@ CREATE INDEX ON t USING GIN ((json_col->>'json_text_field')) ## Unsupported features -The following PostgreSQL syntax and features are currently unsupported. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/41285). +The following PostgreSQL syntax and features are currently unsupported. For details, see the tracking issue. {% include {{ page.version.version }}/sql/trigram-unsupported-syntax.md %} diff --git a/src/current/v23.1/user-defined-functions.md b/src/current/v23.1/user-defined-functions.md index a8396fed7c5..e8bbf051412 100644 --- a/src/current/v23.1/user-defined-functions.md +++ b/src/current/v23.1/user-defined-functions.md @@ -285,15 +285,15 @@ User-defined functions are not currently supported in: - Expressions (column, index, constraint) in tables. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Views. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue ### Limitations on expressions allowed within UDFs @@ -301,15 +301,15 @@ The following are not currently allowed within the body of a UDF: - Mutation statements such as `INSERT`, `UPDATE`, `DELETE`, and `UPSERT`. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87289) + Tracking GitHub issue - CTEs (common table expressions). - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/92961) + Tracking GitHub issue - References to other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue ## See also diff --git a/src/current/v23.1/vectorized-execution.md b/src/current/v23.1/vectorized-execution.md index f8ab08de50b..2e873cbc6e9 100644 --- a/src/current/v23.1/vectorized-execution.md +++ b/src/current/v23.1/vectorized-execution.md @@ -69,7 +69,7 @@ You can also configure a node's total budget for in-memory query processing with The vectorized engine does not support queries containing: -- A join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). See [tracking issue](https://github.com/cockroachdb/cockroach/issues/38018). +- A join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). See tracking issue. ### Spatial features diff --git a/src/current/v23.1/views.md b/src/current/v23.1/views.md index 89357fb6d52..2ae0d9f36f0 100644 --- a/src/current/v23.1/views.md +++ b/src/current/v23.1/views.md @@ -634,7 +634,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.2/admission-control.md b/src/current/v23.2/admission-control.md index c311b4ad7e4..2cac2ba4484 100644 --- a/src/current/v23.2/admission-control.md +++ b/src/current/v23.2/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -149,6 +149,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v23.2/alter-changefeed.md b/src/current/v23.2/alter-changefeed.md index 15a860c7010..e18af7d0403 100644 --- a/src/current/v23.2/alter-changefeed.md +++ b/src/current/v23.2/alter-changefeed.md @@ -238,8 +238,8 @@ For further discussion on using the `FAMILY` keyword and `split_column_families` ## Known limitations -- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any `ALTER CHANGEFEED` statement. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/77171) -- `ALTER CHANGEFEED` will accept duplicate targets without sending an error. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/78285) +- It is necessary to [`PAUSE`]({% link {{ page.version.version }}/pause-job.md %}) the changefeed before performing any `ALTER CHANGEFEED` statement. Tracking GitHub Issue +- `ALTER CHANGEFEED` will accept duplicate targets without sending an error. Tracking GitHub Issue - {% include {{ page.version.version }}/known-limitations/alter-changefeed-cdc-queries.md %} - CockroachDB does not keep track of the [`initial_scan`]({% link {{ page.version.version }}/create-changefeed.md %}#initial-scan) option applied to tables when it is set to `yes` or `only`. For example: diff --git a/src/current/v23.2/alter-table.md b/src/current/v23.2/alter-table.md index f7b7deb5e1c..5e2691b2e58 100644 --- a/src/current/v23.2/alter-table.md +++ b/src/current/v23.2/alter-table.md @@ -265,7 +265,7 @@ For examples, see [Drop columns](#drop-columns). #### Known limitations -- CockroachDB prevents a column from being dropped if it is referenced by a [partial index]({% link {{ page.version.version }}/partial-indexes.md %}) predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See [tracking issue](https://github.com/cockroachdb/cockroach/issues/97813). +- CockroachDB prevents a column from being dropped if it is referenced by a [partial index]({% link {{ page.version.version }}/partial-indexes.md %}) predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See tracking issue. #### Required privileges diff --git a/src/current/v23.2/alter-view.md b/src/current/v23.2/alter-view.md index 47de30984d8..5b3768ab8a5 100644 --- a/src/current/v23.2/alter-view.md +++ b/src/current/v23.2/alter-view.md @@ -36,7 +36,7 @@ Parameter | Description CockroachDB does not currently support: - Changing the [`SELECT`]({% link {{ page.version.version }}/select-clause.md %}) statement executed by a view. Instead, you must drop the existing view and create a new view. -- Renaming a view that other views depend on. This feature may be added in the future (see [tracking issue](https://github.com/cockroachdb/cockroach/issues/10083)). +- Renaming a view that other views depend on. This feature may be added in the future (see tracking issue). ## Examples diff --git a/src/current/v23.2/architecture/sql-layer.md b/src/current/v23.2/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v23.2/architecture/sql-layer.md +++ b/src/current/v23.2/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v23.2/changefeeds-in-multi-region-deployments.md b/src/current/v23.2/changefeeds-in-multi-region-deployments.md index 191ba84bf9c..1f0e23523e9 100644 --- a/src/current/v23.2/changefeeds-in-multi-region-deployments.md +++ b/src/current/v23.2/changefeeds-in-multi-region-deployments.md @@ -22,7 +22,7 @@ Defining an execution locality for a changefeed job, could be useful in the foll - Your cluster is running through VPC peering connections and you need all the data sent through a particular locality. {{site.data.alerts.callout_info}} -In v23.2.0, changefeeds that use the [`execution_locality` option]({% link {{ page.version.version }}/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality) set to a [secondary region]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) could create a plan that assigns most of the ranges to an [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) on the coordinator node. This leads to an unbalanced plan and slow changefeed progress, particularly when the table is large and has many ranges. This issue is [partially mitigated](https://github.com/cockroachdb/cockroach/commit/5d7714a03a891c9fd5746fb876c39dced4f47205) in v23.2.1 and later. +In v23.2.0, changefeeds that use the [`execution_locality` option]({% link {{ page.version.version }}/changefeeds-in-multi-region-deployments.md %}#run-a-changefeed-job-by-locality) set to a [secondary region]({% link {{ page.version.version }}/multiregion-overview.md %}#secondary-regions) could create a plan that assigns most of the ranges to an [aggregator]({% link {{ page.version.version }}/how-does-an-enterprise-changefeed-work.md %}) on the coordinator node. This leads to an unbalanced plan and slow changefeed progress, particularly when the table is large and has many ranges. This issue is partially mitigated in v23.2.1 and later. {{site.data.alerts.end}} ### Syntax diff --git a/src/current/v23.2/cluster-settings.md b/src/current/v23.2/cluster-settings.md index 51d72e9054f..e95fe0d7207 100644 --- a/src/current/v23.2/cluster-settings.md +++ b/src/current/v23.2/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.2/cluster-setup-troubleshooting.md b/src/current/v23.2/cluster-setup-troubleshooting.md index ad7c820493e..916eafb860f 100644 --- a/src/current/v23.2/cluster-setup-troubleshooting.md +++ b/src/current/v23.2/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v23.2/cockroachdb-feature-availability.md b/src/current/v23.2/cockroachdb-feature-availability.md index b768c00d501..13806744b62 100644 --- a/src/current/v23.2/cockroachdb-feature-availability.md +++ b/src/current/v23.2/cockroachdb-feature-availability.md @@ -148,7 +148,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -191,7 +191,7 @@ CockroachDB supports [altering the column types]({% link {{ page.version.version ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Performance limitations could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Performance limitations could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v23.2/common-table-expressions.md b/src/current/v23.2/common-table-expressions.md index 09ac09d1419..c411c2668a1 100644 --- a/src/current/v23.2/common-table-expressions.md +++ b/src/current/v23.2/common-table-expressions.md @@ -444,7 +444,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v23.2/configure-replication-zones.md b/src/current/v23.2/configure-replication-zones.md index eb46be28789..1269a114c02 100644 --- a/src/current/v23.2/configure-replication-zones.md +++ b/src/current/v23.2/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v23.2/create-sequence.md b/src/current/v23.2/create-sequence.md index b8be147522b..ccd82612310 100644 --- a/src/current/v23.2/create-sequence.md +++ b/src/current/v23.2/create-sequence.md @@ -58,7 +58,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.2/create-table.md b/src/current/v23.2/create-table.md index 0976255a0d8..37348651adc 100644 --- a/src/current/v23.2/create-table.md +++ b/src/current/v23.2/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see the [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see the Tracking GitHub Issue. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v23.2/disaster-recovery-planning.md b/src/current/v23.2/disaster-recovery-planning.md index 69eaf1fac32..74ee865969d 100644 --- a/src/current/v23.2/disaster-recovery-planning.md +++ b/src/current/v23.2/disaster-recovery-planning.md @@ -320,7 +320,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v23.2/eventlog.md b/src/current/v23.2/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v23.2/eventlog.md +++ b/src/current/v23.2/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.2/example-apps.md b/src/current/v23.2/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v23.2/example-apps.md +++ b/src/current/v23.2/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v23.2/file-an-issue.md b/src/current/v23.2/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v23.2/file-an-issue.md +++ b/src/current/v23.2/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v23.2/fips.md b/src/current/v23.2/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v23.2/fips.md +++ b/src/current/v23.2/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v23.2/foreign-key.md b/src/current/v23.2/foreign-key.md index f82bfc4aad5..81eaba0d7ac 100644 --- a/src/current/v23.2/foreign-key.md +++ b/src/current/v23.2/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v23.2/full-text-search.md b/src/current/v23.2/full-text-search.md index 5b93835d9c3..6185cd44f6a 100644 --- a/src/current/v23.2/full-text-search.md +++ b/src/current/v23.2/full-text-search.md @@ -460,7 +460,7 @@ Some PostgreSQL syntax and features are unsupported. These include, but are not - `!! tsquery` comparisons. - `tsquery @> tsquery` and `tsquery <@ tsquery` comparisons. -For full details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/41288). +For full details, see the tracking issue. ## See also diff --git a/src/current/v23.2/functions-and-operators.md b/src/current/v23.2/functions-and-operators.md index 9515f188f77..6fb648e24ff 100644 --- a/src/current/v23.2/functions-and-operators.md +++ b/src/current/v23.2/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v23.2/install-client-drivers.md b/src/current/v23.2/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v23.2/install-client-drivers.md +++ b/src/current/v23.2/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
diff --git a/src/current/v23.2/install-cockroachdb-mac.md b/src/current/v23.2/install-cockroachdb-mac.md index 177c2863226..79d5666a5bd 100644 --- a/src/current/v23.2/install-cockroachdb-mac.md +++ b/src/current/v23.2/install-cockroachdb-mac.md @@ -19,7 +19,7 @@ See [Release Notes]({% link releases/{{page.version.version}}.md %}) for what's {% comment %}v22.2.0+{% endcomment %} {{site.data.alerts.callout_danger}} -

On macOS ARM systems, spatial features are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead use Rosetta to run the Intel binary or use the Docker image distribution. Refer to GitHub tracking issue for more information.

+

On macOS ARM systems, spatial features are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead use Rosetta to run the Intel binary or use the Docker image distribution. Refer to GitHub tracking issue for more information.

{{site.data.alerts.end}} {% capture arch_note_homebrew %}

For CockroachDB v22.2.x and above, Homebrew installs binaries for your system architecture, either Intel or ARM (Apple Silicon).

For previous releases, Homebrew installs Intel binaries. Intel binaries can run on ARM systems, but with a significant reduction in performance. CockroachDB on ARM for macOS is experimental and is not yet qualified for production use and not eligible for support or uptime SLA commitments.

{% endcapture %} diff --git a/src/current/v23.2/intellij-idea.md b/src/current/v23.2/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v23.2/intellij-idea.md +++ b/src/current/v23.2/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v23.2/jsonb.md b/src/current/v23.2/jsonb.md index c6153ce4e92..d9465f95fb6 100644 --- a/src/current/v23.2/jsonb.md +++ b/src/current/v23.2/jsonb.md @@ -89,11 +89,11 @@ To [index]({% link {{ page.version.version }}/indexes.md %}) a `JSONB` column yo - You cannot [order]({% link {{ page.version.version }}/order-by.md %}) queries using `JSONB` and `JSON`-typed columns. - [Tracking issue](https://github.com/cockroachdb/cockroach/issues/35706) + Tracking issue - If the execution of a [join]({% link {{ page.version.version }}/joins.md %}) query exceeds the limit set for [memory-buffering operations]({% link {{ page.version.version }}/vectorized-execution.md %}#disk-spilling-operations) (i.e., the value set for the `sql.distsql.temp_storage.workmem` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %})), CockroachDB will spill the intermediate results of computation to disk. If the join operation spills to disk, and at least one of the columns is of type `JSON`, CockroachDB returns the error `unable to encode table key: *tree.DJSON`. If the memory limit is not reached, then the query will be processed without error. - [Tracking issue](https://github.com/cockroachdb/cockroach/issues/35706) + Tracking issue ## Examples diff --git a/src/current/v23.2/known-limitations.md b/src/current/v23.2/known-limitations.md index 24c971f0da0..2289670ca35 100644 --- a/src/current/v23.2/known-limitations.md +++ b/src/current/v23.2/known-limitations.md @@ -14,62 +14,62 @@ This page describes newly identified limitations in the CockroachDB {{page.relea #### Support for PL/pgSQL features -- PL/pgSQL blocks cannot be nested. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/114775) -- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/114701) -- `FOR` loops, including `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops, are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/105246) -- `RETURN NEXT` and `RETURN QUERY` statements are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117744) -- `EXIT` and `CONTINUE` labels and conditions are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/115271) -- `CASE` statements are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117744) -- `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `NULL` statements are not supported for PL/pgSQL. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117744) +- PL/pgSQL blocks cannot be nested. Tracking GitHub issue +- PL/pgSQL arguments cannot be referenced with ordinals (e.g., `$1`, `$2`). Tracking GitHub issue +- `FOR` loops, including `FOR` cursor loops, `FOR` query loops, and `FOREACH` loops, are not supported. Tracking GitHub issue +- `RETURN NEXT` and `RETURN QUERY` statements are not supported. Tracking GitHub issue +- `EXIT` and `CONTINUE` labels and conditions are not supported. Tracking GitHub issue +- `CASE` statements are not supported. Tracking GitHub issue +- `PERFORM`, `EXECUTE`, `GET DIAGNOSTICS`, and `NULL` statements are not supported for PL/pgSQL. Tracking GitHub issue #### Type Handling and Variable Declarations -- `RECORD` parameters and variables are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/105713) -- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117508) -- Syntax for accessing members of composite types without parentheses is not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/114687) -- The `STRICT` option for the PL/pgSQL `INTO` statement is not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/107854) -- `NOT NULL` variable declarations are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/105243) +- `RECORD` parameters and variables are not supported. Tracking GitHub issue +- Variable shadowing (e.g., declaring a variable with the same name in an inner block) is not supported in PL/pgSQL. Tracking GitHub issue +- Syntax for accessing members of composite types without parentheses is not supported. Tracking GitHub issue +- The `STRICT` option for the PL/pgSQL `INTO` statement is not supported. Tracking GitHub issue +- `NOT NULL` variable declarations are not supported. Tracking GitHub issue #### Cursor Functionality -- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/111479) -- `OPEN FOR EXECUTE` is not supported for opening cursors. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117744) +- Cursors opened in PL/pgSQL execute their queries on opening, affecting performance and resource usage. Tracking GitHub issue +- `OPEN FOR EXECUTE` is not supported for opening cursors. Tracking GitHub issue #### Exception Handling -- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/111446) -- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/106237) -- `RAISE` statements message the client directly, and do not produce log output. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117750) -- `ASSERT` debugging checks are not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/117744) +- PL/pgSQL exception blocks cannot catch [transaction retry errors]({% link {{ page.version.version }}/transaction-retry-error-reference.md %}). Tracking GitHub issue +- `RAISE` statements cannot be annotated with names of schema objects related to the error (i.e., using `COLUMN`, `CONSTRAINT`, `DATATYPE`, `TABLE`, or `SCHEMA`). Tracking GitHub issue +- `RAISE` statements message the client directly, and do not produce log output. Tracking GitHub issue +- `ASSERT` debugging checks are not supported. Tracking GitHub issue ### Limitations in User-Defined Functions (UDFs) and Stored Procedures -- Transactions cannot be run within stored procedures. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/115294) -- UDFs and stored procedures cannot call other UDFs or stored procedures from within their bodies. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/88198) -- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or procedures. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/110080) -- UDF and stored procedure definitions do not support `OUT` and `INOUT` argument modes. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/100405) -- The `setval` function cannot be resolved when used inside UDF bodies. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/110860) +- Transactions cannot be run within stored procedures. Tracking GitHub issue +- UDFs and stored procedures cannot call other UDFs or stored procedures from within their bodies. Tracking GitHub issue +- DDL statements (e.g., `CREATE TABLE`, `CREATE INDEX`) are not allowed within UDFs or procedures. Tracking GitHub issue +- UDF and stored procedure definitions do not support `OUT` and `INOUT` argument modes. Tracking GitHub issue +- The `setval` function cannot be resolved when used inside UDF bodies. Tracking GitHub issue ### SQL Optimizer and Read Committed Isolation #### Optimizer and Locking Behavior - The SQL optimizer has limitations under certain isolation levels: - - The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/114737) - - `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/116836) + - The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. Tracking GitHub issue + - `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. Tracking GitHub issue #### Read Committed Isolation Limitations - Several capabilities are not yet supported with [Read Committed isolation]({% link {{ page.version.version }}/read-committed.md %}): - - Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/114778) + - Schema changes (e.g., [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}), [`CREATE SCHEMA`]({% link {{ page.version.version }}/create-schema.md %}), [`CREATE INDEX`]({% link {{ page.version.version }}/create-index.md %})) cannot be performed within explicit `READ COMMITTED` transactions, and will cause transactions to abort. As a workaround, [set the transaction's isolation level]({% link {{ page.version.version }}/read-committed.md %}#set-the-current-transaction-to-read-committed) to `SERIALIZABLE`. Tracking GitHub issue - `READ COMMITTED` transactions performing `INSERT`, `UPDATE`, or `UPSERT` cannot access [`REGIONAL BY ROW`]({% link {{ page.version.version }}/table-localities.md %}#regional-by-row-tables) tables in which [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) and [`PRIMARY KEY`]({% link {{ page.version.version }}/primary-key.md %}) constraints exist, the region is not included in the constraint, and the region cannot be computed from the constraint columns. - - [Shared locks]({% link {{ page.version.version }}/read-committed.md %}#locking-reads) cannot yet be promoted to exclusive locks. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/110435) - - [`SKIP LOCKED`]({% link {{ page.version.version }}/select-for-update.md %}#wait-policies) requests do not check for [replicated locks]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#unreplicated-locks), which can be acquired by `READ COMMITTED` transactions. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/110743) - - Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/112488) + - [Shared locks]({% link {{ page.version.version }}/read-committed.md %}#locking-reads) cannot yet be promoted to exclusive locks. Tracking GitHub issue + - [`SKIP LOCKED`]({% link {{ page.version.version }}/select-for-update.md %}#wait-policies) requests do not check for [replicated locks]({% link {{ page.version.version }}/architecture/transaction-layer.md %}#unreplicated-locks), which can be acquired by `READ COMMITTED` transactions. Tracking GitHub issue + - Multi-column-family checks during updates are not supported under `READ COMMITTED` isolation. Tracking GitHub issue ### `CAST` expressions containing a subquery with an `ENUM` target are not supported -- Casting subqueries to ENUMs in views and UDFs is not supported. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/108184) +- Casting subqueries to ENUMs in views and UDFs is not supported. Tracking GitHub issue ### Physical cluster replication @@ -90,15 +90,15 @@ This page describes newly identified limitations in the CockroachDB {{page.relea - Expressions (column, index, constraint) in tables. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Views. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue #### Limitations on expressions allowed within UDFs @@ -106,11 +106,11 @@ The following are not currently allowed within the body of a [UDF]({% link {{ pa - [Common table expressions]({% link {{ page.version.version }}/common-table-expressions.md %}) (CTE), recursive or non-recursive, are not supported in [user-defined functions]({% link {{ page.version.version }}/user-defined-functions.md %}) (UDF). That is, you cannot use a `WITH` clause in the body of a UDF. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/92961) + Tracking GitHub issue - References to other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue ### Table-level restore will not restore user-defined functions @@ -120,13 +120,13 @@ The following are not currently allowed within the body of a [UDF]({% link {{ pa In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/82774) +Tracking GitHub issue ### `null_ordered_last` does not produce correct results with tuples By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL`s after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it is broken when the ordering column is a tuple. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93558) +Tracking GitHub issue ### Limitations for `DROP OWNED BY` @@ -140,7 +140,7 @@ By default, CockroachDB orders `NULL`s before all other values. For compatibilit [Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. This is expected to be resolved in an upcoming 22.2 patch release. -[GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +GitHub tracking issue ### Limited SQL cursor support @@ -156,7 +156,7 @@ The following PostgreSQL syntax and features are currently unsupported for [trig {% include {{ page.version.version }}/sql/trigram-unsupported-syntax.md %} -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/41285) +Tracking GitHub Issue ### Statements containing multiple modification subqueries of the same table are disallowed @@ -167,31 +167,31 @@ Statements containing multiple modification subqueries mutating the same row cou Note that if multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/70731) +Tracking GitHub Issue ### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/70473) +Tracking GitHub Issue ### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/69540) +Tracking GitHub Issue ### CockroachDB does not allow inverted indexes with `STORING` CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/88278) +Tracking GitHub Issue ### `AS OF SYSTEM TIME` does not support placeholders CockroachDB does not support placeholders in [`AS OF SYSTEM TIME`]({% link {{ page.version.version }}/as-of-system-time.md %}). The time value must be embedded in the SQL string. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/30955) +Tracking GitHub issue ### CockroachDB does not properly optimize some left and anti joins with GIN indexes @@ -342,13 +342,13 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/59649) +Tracking GitHub Issue ### Inverted join for `tsvector` and `tsquery` types is not supported CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/102731) +Tracking GitHub issue ### Using `RESTORE` with multi-region table localities @@ -356,7 +356,7 @@ CockroachDB cannot index-accelerate queries with `@@` predicates when both sides - {% include {{ page.version.version }}/known-limitations/restore-multiregion-match.md %} -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71071) +Tracking GitHub Issue ### `SET` does not `ROLLBACK` in a transaction @@ -392,7 +392,7 @@ CockroachDB cannot index-accelerate queries with `@@` predicates when both sides {% include {{page.version.version}}/known-limitations/stats-refresh-upgrade.md %} {% include {{ page.version.version }}/known-limitations/forecasted-stats-limitations.md %} -- CockroachDB does not collect statistics for [virtual computed columns]({% link {{ page.version.version }}/computed-columns.md %}). This can prevent the [optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) from accurately calculating the cost of scanning an index on a virtual column, and, transitively, the cost of scanning an [expression index]({% link {{ page.version.version }}/expression-indexes.md %}). [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/68254) +- CockroachDB does not collect statistics for [virtual computed columns]({% link {{ page.version.version }}/computed-columns.md %}). This can prevent the [optimizer]({% link {{ page.version.version }}/cost-based-optimizer.md %}) from accurately calculating the cost of scanning an index on a virtual column, and, transitively, the cost of scanning an [expression index]({% link {{ page.version.version }}/expression-indexes.md %}). Tracking GitHub issue ### Differences in syntax and behavior between CockroachDB and PostgreSQL @@ -404,7 +404,7 @@ For a list of known differences in syntax and behavior between CockroachDB and P CockroachDB does not currently support multiple arbiter indexes for [`INSERT ON CONFLICT DO UPDATE`]({% link {{ page.version.version }}/insert.md %}#on-conflict-clause), and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/53170) +Tracking GitHub Issue ### Spatial support limitations @@ -412,33 +412,33 @@ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ - Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/49203) + Tracking GitHub Issue - The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/49402) + Tracking GitHub Issue - The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/49448) + Tracking GitHub Issue - CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/56124) + Tracking GitHub Issue - CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/55903) + Tracking GitHub Issue - CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/55227) + Tracking GitHub Issue - CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). Note that, in [`IMPORT PGDUMP`]({% link molt/migrate-to-cockroachdb.md %}) output, [`GEOMETRY` and `GEOGRAPHY`]({% link {{ page.version.version }}/export-spatial-data.md %}) data type names are prefixed by `public.`. If the type has a type modifier, you must remove the `public.` from the type name in order for the statements to work in CockroachDB. - [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/56492) + Tracking GitHub Issue ### Limitations for composite types @@ -446,7 +446,7 @@ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. Tracking GitHub issue ### Enterprise `BACKUP` does not capture database/table/column comments @@ -454,7 +454,7 @@ The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statemen As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/44396) +Tracking GitHub Issue ### `SHOW BACKUP` does not support symlinks for nodelocal @@ -468,15 +468,15 @@ Accessing the DB Console for a secure cluster now requires login information (i. CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/30192) +Tracking GitHub Issue -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/35706) +Tracking GitHub Issue ### Current sequence value not checked when updating min/max value Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/23719) +Tracking GitHub Issue ### Using `default_int_size` session variable in batch of statements @@ -484,7 +484,7 @@ When setting the `default_int_size` [session variable]({% link {{ page.version.v As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/32846) +Tracking GitHub Issue ### `COPY` syntax not supported by CockroachDB @@ -498,7 +498,7 @@ As a workaround, set `default_int_size` via your database driver, or ensure that {% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/19464) +Tracking GitHub Issue ### Dropping a single partition @@ -533,7 +533,7 @@ ERROR: nextval(): unimplemented: cannot evaluate scalar expressions containing s SQLSTATE: 0A000 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/42508) +Tracking GitHub Issue ### Available capacity metric in the DB Console @@ -609,7 +609,7 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/10679) +Tracking GitHub Issue ### Max size of a single column family @@ -641,10 +641,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/46414) +Tracking GitHub Issue ### CockroachDB does not test for all connection failure scenarios @@ -656,7 +656,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/53410) +Tracking GitHub Issue ### Some column-dropping schema changes do not roll back properly @@ -664,11 +664,11 @@ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.m In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/46541) +Tracking GitHub Issue In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/47712) +Tracking GitHub Issue To reduce the chance that a column drop will roll back incorrectly: @@ -689,7 +689,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. -[Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/94430) +Tracking GitHub issue ### Remove a `UNIQUE` index created as part of `CREATE TABLE` diff --git a/src/current/v23.2/kubernetes-performance.md b/src/current/v23.2/kubernetes-performance.md index 47aef82797d..356ecb6e2fb 100644 --- a/src/current/v23.2/kubernetes-performance.md +++ b/src/current/v23.2/kubernetes-performance.md @@ -20,9 +20,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -332,7 +332,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v23.2/log-formats.md b/src/current/v23.2/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v23.2/log-formats.md +++ b/src/current/v23.2/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.2/logging.md b/src/current/v23.2/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v23.2/logging.md +++ b/src/current/v23.2/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v23.2/migrate-from-avro.md b/src/current/v23.2/migrate-from-avro.md index 676f5274bff..82e6e8cd09f 100644 --- a/src/current/v23.2/migrate-from-avro.md +++ b/src/current/v23.2/migrate-from-avro.md @@ -163,7 +163,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v23.2/monitor-cockroachdb-kubernetes.md b/src/current/v23.2/monitor-cockroachdb-kubernetes.md index d2ca66e397f..072c93c275f 100644 --- a/src/current/v23.2/monitor-cockroachdb-kubernetes.md +++ b/src/current/v23.2/monitor-cockroachdb-kubernetes.md @@ -128,7 +128,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -177,14 +177,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -214,12 +214,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -244,12 +244,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v23.2/monitor-cockroachdb-with-prometheus.md b/src/current/v23.2/monitor-cockroachdb-with-prometheus.md index 7f4d671aa52..8b7f0876475 100644 --- a/src/current/v23.2/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v23.2/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts-rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts-rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v23.2/monitoring-and-alerting.md b/src/current/v23.2/monitoring-and-alerting.md index f80cc618eb3..126b6e1aa6c 100644 --- a/src/current/v23.2/monitoring-and-alerting.md +++ b/src/current/v23.2/monitoring-and-alerting.md @@ -1075,7 +1075,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1084,7 +1084,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1092,7 +1092,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1100,7 +1100,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1116,7 +1116,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1124,7 +1124,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v23.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v23.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 2ebfe678a0a..9d3334e3962 100644 --- a/src/current/v23.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v23.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v23.2/partial-indexes.md b/src/current/v23.2/partial-indexes.md index ba2b096d089..137429b4674 100644 --- a/src/current/v23.2/partial-indexes.md +++ b/src/current/v23.2/partial-indexes.md @@ -88,9 +88,9 @@ You can force queries [to use a specific partial index]({% link {{ page.version. ## Known limitations -- CockroachDB does not currently support [`IMPORT`]({% link {{ page.version.version }}/import.md %}) statements on tables with partial indexes. See [tracking issue](https://github.com/cockroachdb/cockroach/issues/50225). -- CockroachDB does not currently support multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE`, and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. See [tracking issue](https://github.com/cockroachdb/cockroach/issues/53170). -- CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See [tracking issue](https://github.com/cockroachdb/cockroach/issues/97813). +- CockroachDB does not currently support [`IMPORT`]({% link {{ page.version.version }}/import.md %}) statements on tables with partial indexes. See tracking issue. +- CockroachDB does not currently support multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE`, and will return an error if there are multiple unique or exclusion constraints matching the `ON CONFLICT DO UPDATE` specification. See tracking issue. +- CockroachDB prevents a column from being dropped using [`ALTER TABLE ... DROP COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#drop-column) if it is referenced by a partial index predicate. To drop such a column, the partial indexes need to be dropped first using [`DROP INDEX`]({% link {{ page.version.version }}/drop-index.md %}). See tracking issue. ## Examples diff --git a/src/current/v23.2/postgresql-compatibility.md b/src/current/v23.2/postgresql-compatibility.md index 319e81338f0..56cf103b710 100644 --- a/src/current/v23.2/postgresql-compatibility.md +++ b/src/current/v23.2/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/35370) +Tracking GitHub Issue ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/46563) +Tracking GitHub Issue ### SQL Compatibility diff --git a/src/current/v23.2/query-spatial-data.md b/src/current/v23.2/query-spatial-data.md index 7b5f5aadef4..8bfb8878268 100644 --- a/src/current/v23.2/query-spatial-data.md +++ b/src/current/v23.2/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v23.2/read-committed.md b/src/current/v23.2/read-committed.md index 94c697270f2..fea46dbbcf2 100644 --- a/src/current/v23.2/read-committed.md +++ b/src/current/v23.2/read-committed.md @@ -22,7 +22,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -941,5 +941,5 @@ The following affect the performance of `READ COMMITTED` transactions: - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v23.2/restore.md b/src/current/v23.2/restore.md index 035a2fad37b..ca5fabde732 100644 --- a/src/current/v23.2/restore.md +++ b/src/current/v23.2/restore.md @@ -219,11 +219,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies @@ -586,7 +586,7 @@ For more detail on using this option with `BACKUP`, see [Incremental backups wit ## Known limitations -- To successfully [restore a table into a multi-region database](#restoring-to-multi-region-databases), it is necessary for the order and regions to match between the source and destination database. See the [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#using-restore-with-multi-region-table-localities) page for detail on ordering and matching regions. [Tracking GitHub Issue](https://github.com/cockroachdb/cockroach/issues/71071) +- To successfully [restore a table into a multi-region database](#restoring-to-multi-region-databases), it is necessary for the order and regions to match between the source and destination database. See the [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#using-restore-with-multi-region-table-localities) page for detail on ordering and matching regions. Tracking GitHub Issue - {% include {{ page.version.version }}/known-limitations/restore-tables-non-multi-reg.md %} - {% include {{ page.version.version }}/known-limitations/restore-udf.md %} diff --git a/src/current/v23.2/schedule-cockroachdb-kubernetes.md b/src/current/v23.2/schedule-cockroachdb-kubernetes.md index 973d234ffa9..de53f7c8449 100644 --- a/src/current/v23.2/schedule-cockroachdb-kubernetes.md +++ b/src/current/v23.2/schedule-cockroachdb-kubernetes.md @@ -108,7 +108,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the Operator's custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v23.2/spatial-tutorial.md b/src/current/v23.2/spatial-tutorial.md index 1cfbecc588f..9bcf8e883ee 100644 --- a/src/current/v23.2/spatial-tutorial.md +++ b/src/current/v23.2/spatial-tutorial.md @@ -964,7 +964,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v23.2/sql-feature-support.md b/src/current/v23.2/sql-feature-support.md index afcba09096c..fe520c51c34 100644 --- a/src/current/v23.2/sql-feature-support.md +++ b/src/current/v23.2/sql-feature-support.md @@ -193,7 +193,7 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Common table expressions | Partial | Common Extension | [Common Table Expressions documentation]({% link {{ page.version.version }}/common-table-expressions.md %}) Stored procedures | Partial | Common Extension | [Stored procedures documentation]({% link {{ page.version.version }}/stored-procedures.md %}) Cursors | Partial | Standard | [Cursors documentation]({% link {{ page.version.version }}/cursors.md %}) - Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. [GitHub issue tracking trigger support](https://github.com/cockroachdb/cockroach/issues/28296). + Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. GitHub issue tracking trigger support. Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v23.2/sql-name-resolution.md b/src/current/v23.2/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v23.2/sql-name-resolution.md +++ b/src/current/v23.2/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v23.2/srid-4326.md b/src/current/v23.2/srid-4326.md index cfd15368e47..c344ee62835 100644 --- a/src/current/v23.2/srid-4326.md +++ b/src/current/v23.2/srid-4326.md @@ -114,7 +114,7 @@ ERROR: st_contains(): operation on mixed SRIDs forbidden: (Point, 0) != (Point, ## Known limitations {{site.data.alerts.callout_info}} -Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. For more information, see the tracking issue [cockroachdb/cockroach#55903](https://github.com/cockroachdb/cockroach/issues/55903). +Defining a custom SRID by inserting rows into [`spatial_ref_sys`]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial_ref_sys) is not currently supported. For more information, see the tracking issue cockroachdb/cockroach#55903. {{site.data.alerts.end}} ## See also diff --git a/src/current/v23.2/st_union.md b/src/current/v23.2/st_union.md index 290e6481fe6..133ed288db2 100644 --- a/src/current/v23.2/st_union.md +++ b/src/current/v23.2/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.2/temporary-tables.md b/src/current/v23.2/temporary-tables.md index bd3a9adefdd..0eda33fc45f 100644 --- a/src/current/v23.2/temporary-tables.md +++ b/src/current/v23.2/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v23.2/trigram-indexes.md b/src/current/v23.2/trigram-indexes.md index 474b258163a..aeb27eb78e0 100644 --- a/src/current/v23.2/trigram-indexes.md +++ b/src/current/v23.2/trigram-indexes.md @@ -350,7 +350,7 @@ CREATE INDEX ON t USING GIN ((json_col->>'json_text_field')) ## Unsupported features -The following PostgreSQL syntax and features are currently unsupported. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/41285). +The following PostgreSQL syntax and features are currently unsupported. For details, see the tracking issue. {% include {{ page.version.version }}/sql/trigram-unsupported-syntax.md %} diff --git a/src/current/v23.2/upgrade-cockroach-version.md b/src/current/v23.2/upgrade-cockroach-version.md index e7bff0217a9..e41fed2eb9e 100644 --- a/src/current/v23.2/upgrade-cockroach-version.md +++ b/src/current/v23.2/upgrade-cockroach-version.md @@ -142,16 +142,16 @@ By default, after all nodes are running the new version, the upgrade process wil When upgrading from {{ previous_version }} to {{ page.version.version }}, certain features and performance improvements will be enabled only after finalizing the upgrade, including but not limited to: -- The coalescing of storage ranges for each table, index, or partition (collectively referred to as "schema objects") into a single range when individual schema objects are smaller than the default configured maximum range size (controlled using zone configs, specifically the `range_max_bytes parameter`). This change improves scalability with respect to the number of schema objects, since the underlying range count is no longer a potential performance bottleneck. After finalizing the upgrade to v23.2, you may observe a round of range merges and snapshot transfers. To disable this optimization, **before finalizing the upgrade**, set the `spanconfig.storage_coalesce_adjacent.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `false`. See the [v23.1 release notes]({% link releases/v23.1.md %}) for `SHOW RANGES` for more details. [#102961][#102961] -- The new output log format, which allows configuration of a time zone in log output. Before configuring a time zone, the cluster must be finalized on v23.2. [#104265][#104265] -- Performance improvements when a node reclaims disk space. [#106177][#106177] -- The following [admission control]({% link {{ page.version.version }}/admission-control.md %}#operations-subject-to-admission-control) mechanisms, which help to maintain cluster performance and availability when some nodes experience high load:
  • Delete operations
  • Replication
  • [#98308][#98308] -- Collecting a statement diagnostic bundle for a particular plan. The existing fingerprint-based matching has been extended to also include plan-gist-based matching and "anti-matching" (collecting a bundle for any plan other than the provided plan gist). [#105477][#105477] -- A new system table, `system.region_liveness`, that tracks the availability and the timestamp of the latest unavailability for each cluster region. [#107903][#107903] -- The ability of a `WaitPolicy_Error` request to push the timestamp of a transaction with a lower priority. [#108190][#108190] -- Configuring a changefeed with the `lagging_ranges_threshold` or `lagging_ranges_polling_interval` [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options). [#110649][#110649] -- Removal of the upgrade step `grantExecuteToPublicOnAllFunctions`, which is no longer required because post-serialization changes now grant `EXECUTE` on functions to the public role. [#114203][#114203] -- A fix to a bug that could allow a user to execute a user-defined function without the `EXECUTE` privilege on the function. If a user does not have the privilege, the user-defined function does not run and an error is logged. [#114203][#114203] +- The coalescing of storage ranges for each table, index, or partition (collectively referred to as "schema objects") into a single range when individual schema objects are smaller than the default configured maximum range size (controlled using zone configs, specifically the `range_max_bytes parameter`). This change improves scalability with respect to the number of schema objects, since the underlying range count is no longer a potential performance bottleneck. After finalizing the upgrade to v23.2, you may observe a round of range merges and snapshot transfers. To disable this optimization, **before finalizing the upgrade**, set the `spanconfig.storage_coalesce_adjacent.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `false`. See the [v23.1 release notes]({% link releases/v23.1.md %}) for `SHOW RANGES` for more details. #102961 +- The new output log format, which allows configuration of a time zone in log output. Before configuring a time zone, the cluster must be finalized on v23.2. #104265 +- Performance improvements when a node reclaims disk space. #106177 +- The following [admission control]({% link {{ page.version.version }}/admission-control.md %}#operations-subject-to-admission-control) mechanisms, which help to maintain cluster performance and availability when some nodes experience high load:
    • Delete operations
    • Replication
    • #98308 +- Collecting a statement diagnostic bundle for a particular plan. The existing fingerprint-based matching has been extended to also include plan-gist-based matching and "anti-matching" (collecting a bundle for any plan other than the provided plan gist). #105477 +- A new system table, `system.region_liveness`, that tracks the availability and the timestamp of the latest unavailability for each cluster region. #107903 +- The ability of a `WaitPolicy_Error` request to push the timestamp of a transaction with a lower priority. #108190 +- Configuring a changefeed with the `lagging_ranges_threshold` or `lagging_ranges_polling_interval` [changefeed options]({% link {{ page.version.version }}/create-changefeed.md %}#options). #110649 +- Removal of the upgrade step `grantExecuteToPublicOnAllFunctions`, which is no longer required because post-serialization changes now grant `EXECUTE` on functions to the public role. #114203 +- A fix to a bug that could allow a user to execute a user-defined function without the `EXECUTE` privilege on the function. If a user does not have the privilege, the user-defined function does not run and an error is logged. #114203 For more details about a given feature, refer to the [CockroachDB v23.2.0 release notes]({% link releases/v23.2.md %}#v23-2-0). @@ -339,13 +339,3 @@ In the event of catastrophic failure or corruption, the only option will be to s - [View Version Details]({% link {{ page.version.version }}/cockroach-version.md %}) - [Release notes for our latest version]({% link releases/{{page.version.version}}.md %}) -[#102961]: https://github.com/cockroachdb/cockroach/pull/102961 -[#104265]: https://github.com/cockroachdb/cockroach/pull/104265 -[#107474]: https://github.com/cockroachdb/cockroach/pull/107474 -[#106177]: https://github.com/cockroachdb/cockroach/pull/106177 -[#98308]: https://github.com/cockroachdb/cockroach/pull/98308 -[#105477]: https://github.com/cockroachdb/cockroach/pull/105477 -[#107903]: https://github.com/cockroachdb/cockroach/pull/107903 -[#108190]: https://github.com/cockroachdb/cockroach/pull/108190 -[#110649]: https://github.com/cockroachdb/cockroach/pull/110649 -[#114203]: https://github.com/cockroachdb/cockroach/pull/114203 diff --git a/src/current/v23.2/user-defined-functions.md b/src/current/v23.2/user-defined-functions.md index fb0684166e3..63aeda3347d 100644 --- a/src/current/v23.2/user-defined-functions.md +++ b/src/current/v23.2/user-defined-functions.md @@ -297,15 +297,15 @@ User-defined functions are not currently supported in: - Expressions (column, index, constraint) in tables. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Views. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/87699) + Tracking GitHub issue - Other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue ### Limitations on UDF creation @@ -313,11 +313,11 @@ The following cannot be used in UDF definitions: - `OUT` and `INOUT` argument modes. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/100405) + Tracking GitHub issue - `RECORD` input arguments. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/105713) + Tracking GitHub issue ### Limitations on expressions allowed within UDFs @@ -325,15 +325,15 @@ The following are not currently allowed within the body of a UDF: - CTEs (common table expressions). - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/92961) + Tracking GitHub issue - References to other user-defined functions. - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/93049) + Tracking GitHub issue - [DDL statements]({% link {{ page.version.version }}/sql-statements.md %}#data-definition-statements) (e.g., `CREATE TABLE`, `CREATE INDEX`). - [Tracking GitHub issue](https://github.com/cockroachdb/cockroach/issues/110080) + Tracking GitHub issue ## See also diff --git a/src/current/v23.2/vectorized-execution.md b/src/current/v23.2/vectorized-execution.md index f8ab08de50b..2e873cbc6e9 100644 --- a/src/current/v23.2/vectorized-execution.md +++ b/src/current/v23.2/vectorized-execution.md @@ -69,7 +69,7 @@ You can also configure a node's total budget for in-memory query processing with The vectorized engine does not support queries containing: -- A join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). See [tracking issue](https://github.com/cockroachdb/cockroach/issues/38018). +- A join filtered with an [`ON` expression]({% link {{ page.version.version }}/joins.md %}#supported-join-conditions). See tracking issue. ### Spatial features diff --git a/src/current/v23.2/views.md b/src/current/v23.2/views.md index 89357fb6d52..2ae0d9f36f0 100644 --- a/src/current/v23.2/views.md +++ b/src/current/v23.2/views.md @@ -634,7 +634,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.1/admission-control.md b/src/current/v24.1/admission-control.md index e57a49b142a..d21572c0e3b 100644 --- a/src/current/v24.1/admission-control.md +++ b/src/current/v24.1/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -148,6 +148,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v24.1/architecture/sql-layer.md b/src/current/v24.1/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v24.1/architecture/sql-layer.md +++ b/src/current/v24.1/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v24.1/cluster-settings.md b/src/current/v24.1/cluster-settings.md index 1d4f9ce0a72..d010bbad362 100644 --- a/src/current/v24.1/cluster-settings.md +++ b/src/current/v24.1/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.1/cluster-setup-troubleshooting.md b/src/current/v24.1/cluster-setup-troubleshooting.md index 8e4c67bf8fc..309ec038836 100644 --- a/src/current/v24.1/cluster-setup-troubleshooting.md +++ b/src/current/v24.1/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v24.1/cockroachdb-feature-availability.md b/src/current/v24.1/cockroachdb-feature-availability.md index b5dcb68812a..170f82245c3 100644 --- a/src/current/v24.1/cockroachdb-feature-availability.md +++ b/src/current/v24.1/cockroachdb-feature-availability.md @@ -140,7 +140,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -175,7 +175,7 @@ CockroachDB supports [altering the column types]({% link {{ page.version.version ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v24.1/common-table-expressions.md b/src/current/v24.1/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v24.1/common-table-expressions.md +++ b/src/current/v24.1/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v24.1/configure-replication-zones.md b/src/current/v24.1/configure-replication-zones.md index 647bf140411..30913c684bb 100644 --- a/src/current/v24.1/configure-replication-zones.md +++ b/src/current/v24.1/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v24.1/create-sequence.md b/src/current/v24.1/create-sequence.md index bae71037988..0e6019364ac 100644 --- a/src/current/v24.1/create-sequence.md +++ b/src/current/v24.1/create-sequence.md @@ -59,7 +59,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.1/create-table.md b/src/current/v24.1/create-table.md index d1a4c389267..d6738c3e0a0 100644 --- a/src/current/v24.1/create-table.md +++ b/src/current/v24.1/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see [#42508](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see #42508. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v24.1/disaster-recovery-planning.md b/src/current/v24.1/disaster-recovery-planning.md index 69eaf1fac32..74ee865969d 100644 --- a/src/current/v24.1/disaster-recovery-planning.md +++ b/src/current/v24.1/disaster-recovery-planning.md @@ -320,7 +320,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v24.1/eventlog.md b/src/current/v24.1/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v24.1/eventlog.md +++ b/src/current/v24.1/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.1/example-apps.md b/src/current/v24.1/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v24.1/example-apps.md +++ b/src/current/v24.1/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v24.1/file-an-issue.md b/src/current/v24.1/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v24.1/file-an-issue.md +++ b/src/current/v24.1/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v24.1/fips.md b/src/current/v24.1/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v24.1/fips.md +++ b/src/current/v24.1/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v24.1/foreign-key.md b/src/current/v24.1/foreign-key.md index 55677511e3d..7b06a11d484 100644 --- a/src/current/v24.1/foreign-key.md +++ b/src/current/v24.1/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v24.1/functions-and-operators.md b/src/current/v24.1/functions-and-operators.md index 9515f188f77..6fb648e24ff 100644 --- a/src/current/v24.1/functions-and-operators.md +++ b/src/current/v24.1/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v24.1/install-client-drivers.md b/src/current/v24.1/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v24.1/install-client-drivers.md +++ b/src/current/v24.1/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v24.1/install-cockroachdb-mac.md b/src/current/v24.1/install-cockroachdb-mac.md index 5155c5bf0cd..eaaa8742c52 100644 --- a/src/current/v24.1/install-cockroachdb-mac.md +++ b/src/current/v24.1/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v24.1/intellij-idea.md b/src/current/v24.1/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v24.1/intellij-idea.md +++ b/src/current/v24.1/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v24.1/known-limitations.md b/src/current/v24.1/known-limitations.md index c42ebef7e9a..af0e4a843da 100644 --- a/src/current/v24.1/known-limitations.md +++ b/src/current/v24.1/known-limitations.md @@ -20,16 +20,16 @@ Limitations will be added as they are discovered. ### PL/pgSQL -- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. [#121605](https://github.com/cockroachdb/cockroach/issues/121605) -- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. [#114676](https://github.com/cockroachdb/cockroach/issues/114676) +- It is not possible to use a variable as a target more than once in the same `INTO` clause. For example, `SELECT 1, 2 INTO x, x;`. #121605 +- PLpgSQL variable declarations cannot inherit the type of a table row or column using `%TYPE` or `%ROWTYPE` syntax. #114676 ### UDFs and stored procedures -- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. [#122264](https://github.com/cockroachdb/cockroach/issues/122264) -- Routines cannot be created if they reference temporary tables. [#121375](https://github.com/cockroachdb/cockroach/issues/121375) -- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. [#121251](https://github.com/cockroachdb/cockroach/issues/121251) -- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. [#121247](https://github.com/cockroachdb/cockroach/issues/121247) -- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. [#122945](https://github.com/cockroachdb/cockroach/issues/122945) +- Routines cannot be invoked with named arguments, e.g., `SELECT foo(a => 1, b => 2);` or `SELECT foo(b := 1, a := 2);`. #122264 +- Routines cannot be created if they reference temporary tables. #121375 +- Routines cannot be created with unnamed `INOUT` parameters. For example, `CREATE PROCEDURE p(INOUT INT) AS $$ BEGIN NULL; END; $$ LANGUAGE PLpgSQL;`. #121251 +- Routines cannot be created if they return fewer columns than declared. For example, `CREATE FUNCTION f(OUT sum INT, INOUT a INT, INOUT b INT) LANGUAGE SQL AS $$ SELECT (a + b, b); $$;`. #121247 +- A `RECORD`-returning UDF cannot be created without a `RETURN` statement in the root block, which would restrict the wildcard type to a concrete one. #122945 ### Physical cluster replication fail back to primary cluster @@ -72,7 +72,7 @@ CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/d #### `CAST` expressions containing a subquery with an `ENUM` target are not supported -Casting subqueries to ENUMs in views and UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +Casting subqueries to ENUMs in views and UDFs is not supported. #108184 #### Statements containing multiple modification subqueries of the same table are disallowed @@ -81,13 +81,13 @@ Statements containing multiple modification subqueries mutating the same row cou - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). -If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) +If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 #### Using `default_int_size` session variable in batch of statements When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::IN`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. -As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 #### Overload resolution for collated strings @@ -114,15 +114,15 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[#10679](https://github.com/cockroachdb/cockroach/issues/10679) +#10679 #### Current sequence value not checked when updating min/max value -Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 #### `null_ordered_last` does not produce correct results with tuples -By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 ### Functions and procedures @@ -168,10 +168,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[#46414](https://github.com/cockroachdb/cockroach/issues/46414) +#46414 #### `CANCEL JOB` limitations @@ -222,7 +222,7 @@ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/42508/v24.1 ~~~ -[#42508](https://github.com/cockroachdb/cockroach/issues/42508) +#42508 #### Dropping a column referenced by a partial index @@ -249,9 +249,9 @@ When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: @@ -287,7 +287,7 @@ As a workaround, you can either [manually split a table's columns into multiple #### Placeholders in `PARTITION BY` -{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} [#19464](https://github.com/cockroachdb/cockroach/issues/19464) +{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} #19464 #### Unsupported trigram syntax @@ -303,7 +303,7 @@ The following PostgreSQL syntax and features are currently unsupported for [full #### CockroachDB does not allow inverted indexes with `STORING` -CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) +CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 #### Multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE` @@ -323,19 +323,19 @@ CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}), with the following limitations: -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} @@ -349,9 +349,9 @@ Refer to [`OID` best practices]({% link {{ page.version.version }}/oid.md %}#bes - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(v).x`. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(v).x`. #114687 #### `ALTER TYPE` limitations @@ -395,7 +395,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[#53410](https://github.com/cockroachdb/cockroach/issues/53410) +#53410 #### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -404,7 +404,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run -This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) +This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 #### Simultaneous client connections and running queries on a single node @@ -440,7 +440,7 @@ As a workaround, [execute the file from the command line]({% link {{ page.versio #### Spatial features disabled for ARM Macs -[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. GitHub tracking issue #### Logging system limitations @@ -486,7 +486,7 @@ Accessing the DB Console for a secure cluster now requires login information (i. The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of a table or database. -As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 #### `SHOW BACKUP` does not support symlinks for nodelocal @@ -513,8 +513,8 @@ Change data capture (CDC) provides efficient, distributed, row-level changefeeds The SQL optimizer has limitations under certain isolation levels: -- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroach/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) +- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. #114737 +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 #### Statistics limitations @@ -523,7 +523,7 @@ The SQL optimizer has limitations under certain isolation levels: #### Incorrect query plans for partitions with `NULL` values -In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 #### Vectorized engine limitations @@ -531,15 +531,15 @@ In cases where the partition definition includes a comparison with `NULL` and a #### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution -The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 #### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns -The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 #### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints -CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. #30192 #### Import with a high amount of disk contention @@ -694,7 +694,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[#59649](https://github.com/cockroachdb/cockroach/issues/59649) +#59649 #### Locality optimized search limitations @@ -707,4 +707,4 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. #### Inverted join for `tsvector` and `tsquery` types is not supported -CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) \ No newline at end of file +CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. #102731 \ No newline at end of file diff --git a/src/current/v24.1/kubernetes-performance.md b/src/current/v24.1/kubernetes-performance.md index 47aef82797d..356ecb6e2fb 100644 --- a/src/current/v24.1/kubernetes-performance.md +++ b/src/current/v24.1/kubernetes-performance.md @@ -20,9 +20,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -332,7 +332,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v24.1/log-formats.md b/src/current/v24.1/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v24.1/log-formats.md +++ b/src/current/v24.1/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.1/logging.md b/src/current/v24.1/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v24.1/logging.md +++ b/src/current/v24.1/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.1/migrate-from-avro.md b/src/current/v24.1/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v24.1/migrate-from-avro.md +++ b/src/current/v24.1/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v24.1/monitor-cockroachdb-kubernetes.md b/src/current/v24.1/monitor-cockroachdb-kubernetes.md index b6af4eab828..a175f0eefaa 100644 --- a/src/current/v24.1/monitor-cockroachdb-kubernetes.md +++ b/src/current/v24.1/monitor-cockroachdb-kubernetes.md @@ -128,7 +128,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -177,14 +177,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -214,12 +214,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -244,12 +244,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v24.1/monitor-cockroachdb-with-prometheus.md b/src/current/v24.1/monitor-cockroachdb-with-prometheus.md index ad08d818b4e..2c661d20b66 100644 --- a/src/current/v24.1/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v24.1/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v24.1/monitoring-and-alerting.md b/src/current/v24.1/monitoring-and-alerting.md index 53224596aec..5132ea9a5d0 100644 --- a/src/current/v24.1/monitoring-and-alerting.md +++ b/src/current/v24.1/monitoring-and-alerting.md @@ -1116,7 +1116,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1125,7 +1125,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1133,7 +1133,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1141,7 +1141,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1157,7 +1157,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1165,7 +1165,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v24.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v24.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index b47593bda96..09b361cd5ac 100644 --- a/src/current/v24.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v24.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v24.1/postgresql-compatibility.md b/src/current/v24.1/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v24.1/postgresql-compatibility.md +++ b/src/current/v24.1/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v24.1/query-spatial-data.md b/src/current/v24.1/query-spatial-data.md index 32a98647a7d..13ba0a3b5ab 100644 --- a/src/current/v24.1/query-spatial-data.md +++ b/src/current/v24.1/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v24.1/read-committed.md b/src/current/v24.1/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v24.1/read-committed.md +++ b/src/current/v24.1/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v24.1/restore.md b/src/current/v24.1/restore.md index 38833aed511..a5ae1c8ceea 100644 --- a/src/current/v24.1/restore.md +++ b/src/current/v24.1/restore.md @@ -219,11 +219,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v24.1/schedule-cockroachdb-kubernetes.md b/src/current/v24.1/schedule-cockroachdb-kubernetes.md index 973d234ffa9..de53f7c8449 100644 --- a/src/current/v24.1/schedule-cockroachdb-kubernetes.md +++ b/src/current/v24.1/schedule-cockroachdb-kubernetes.md @@ -108,7 +108,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the Operator's custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v24.1/spatial-tutorial.md b/src/current/v24.1/spatial-tutorial.md index d0b616d2433..c5a13a29f11 100644 --- a/src/current/v24.1/spatial-tutorial.md +++ b/src/current/v24.1/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v24.1/sql-feature-support.md b/src/current/v24.1/sql-feature-support.md index 9bb433cba3e..8ff09bdd2f7 100644 --- a/src/current/v24.1/sql-feature-support.md +++ b/src/current/v24.1/sql-feature-support.md @@ -193,7 +193,7 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Common table expressions | Partial | Common Extension | [Common Table Expressions documentation]({% link {{ page.version.version }}/common-table-expressions.md %}) Stored procedures | Partial | Common Extension | [Stored procedures documentation]({% link {{ page.version.version }}/stored-procedures.md %}) Cursors | Partial | Standard | [Cursors documentation]({% link {{ page.version.version }}/cursors.md %}) - Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. [GitHub issue tracking trigger support](https://github.com/cockroachdb/cockroach/issues/28296). + Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. GitHub issue tracking trigger support. Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v24.1/sql-name-resolution.md b/src/current/v24.1/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v24.1/sql-name-resolution.md +++ b/src/current/v24.1/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v24.1/st_union.md b/src/current/v24.1/st_union.md index 7beb2f3ae3d..56268b933a2 100644 --- a/src/current/v24.1/st_union.md +++ b/src/current/v24.1/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.1/temporary-tables.md b/src/current/v24.1/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v24.1/temporary-tables.md +++ b/src/current/v24.1/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.1/upgrade-cockroach-version.md b/src/current/v24.1/upgrade-cockroach-version.md index c03e28790b7..c5f0a72b5e9 100644 --- a/src/current/v24.1/upgrade-cockroach-version.md +++ b/src/current/v24.1/upgrade-cockroach-version.md @@ -326,13 +326,3 @@ In the event of catastrophic failure or corruption, it may be necessary to [rest - [View Version Details]({% link {{ page.version.version }}/cockroach-version.md %}) - [Release notes for our latest version]({% link releases/{{page.version.version}}.md %}) -[#102961]: https://github.com/cockroachdb/cockroach/pull/102961 -[#104265]: https://github.com/cockroachdb/cockroach/pull/104265 -[#107474]: https://github.com/cockroachdb/cockroach/pull/107474 -[#106177]: https://github.com/cockroachdb/cockroach/pull/106177 -[#98308]: https://github.com/cockroachdb/cockroach/pull/98308 -[#105477]: https://github.com/cockroachdb/cockroach/pull/105477 -[#107903]: https://github.com/cockroachdb/cockroach/pull/107903 -[#108190]: https://github.com/cockroachdb/cockroach/pull/108190 -[#110649]: https://github.com/cockroachdb/cockroach/pull/110649 -[#114203]: https://github.com/cockroachdb/cockroach/pull/114203 diff --git a/src/current/v24.1/views.md b/src/current/v24.1/views.md index 1b703728bcf..ce63d6cd0f5 100644 --- a/src/current/v24.1/views.md +++ b/src/current/v24.1/views.md @@ -634,7 +634,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.2/admission-control.md b/src/current/v24.2/admission-control.md index b68517a11f2..fe6e27ce737 100644 --- a/src/current/v24.2/admission-control.md +++ b/src/current/v24.2/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -145,6 +145,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v24.2/architecture/sql-layer.md b/src/current/v24.2/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v24.2/architecture/sql-layer.md +++ b/src/current/v24.2/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v24.2/cluster-settings.md b/src/current/v24.2/cluster-settings.md index 8bb83a25a09..149573f51ab 100644 --- a/src/current/v24.2/cluster-settings.md +++ b/src/current/v24.2/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v24.2/cluster-setup-troubleshooting.md b/src/current/v24.2/cluster-setup-troubleshooting.md index 997923e9f03..300ad9c05f1 100644 --- a/src/current/v24.2/cluster-setup-troubleshooting.md +++ b/src/current/v24.2/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v24.2/cockroachdb-feature-availability.md b/src/current/v24.2/cockroachdb-feature-availability.md index 51cb77ca5e1..ee437b28bf2 100644 --- a/src/current/v24.2/cockroachdb-feature-availability.md +++ b/src/current/v24.2/cockroachdb-feature-availability.md @@ -152,7 +152,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -187,7 +187,7 @@ CockroachDB supports [altering the column types]({% link {{ page.version.version ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Performance limitations could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Performance limitations could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v24.2/common-table-expressions.md b/src/current/v24.2/common-table-expressions.md index 09ac09d1419..c411c2668a1 100644 --- a/src/current/v24.2/common-table-expressions.md +++ b/src/current/v24.2/common-table-expressions.md @@ -444,7 +444,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v24.2/configure-replication-zones.md b/src/current/v24.2/configure-replication-zones.md index 309da81984a..bfddfce2f9a 100644 --- a/src/current/v24.2/configure-replication-zones.md +++ b/src/current/v24.2/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v24.2/create-sequence.md b/src/current/v24.2/create-sequence.md index eba6f3a07f7..389d71675ed 100644 --- a/src/current/v24.2/create-sequence.md +++ b/src/current/v24.2/create-sequence.md @@ -58,7 +58,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.2/create-table.md b/src/current/v24.2/create-table.md index d1a4c389267..d6738c3e0a0 100644 --- a/src/current/v24.2/create-table.md +++ b/src/current/v24.2/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see [#42508](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see #42508. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v24.2/disaster-recovery-planning.md b/src/current/v24.2/disaster-recovery-planning.md index ae6bdbc5f1b..bcc6a4a29e3 100644 --- a/src/current/v24.2/disaster-recovery-planning.md +++ b/src/current/v24.2/disaster-recovery-planning.md @@ -320,7 +320,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v24.2/eventlog.md b/src/current/v24.2/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v24.2/eventlog.md +++ b/src/current/v24.2/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.2/example-apps.md b/src/current/v24.2/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v24.2/example-apps.md +++ b/src/current/v24.2/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v24.2/file-an-issue.md b/src/current/v24.2/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v24.2/file-an-issue.md +++ b/src/current/v24.2/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v24.2/fips.md b/src/current/v24.2/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v24.2/fips.md +++ b/src/current/v24.2/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v24.2/foreign-key.md b/src/current/v24.2/foreign-key.md index 55677511e3d..7b06a11d484 100644 --- a/src/current/v24.2/foreign-key.md +++ b/src/current/v24.2/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v24.2/functions-and-operators.md b/src/current/v24.2/functions-and-operators.md index 550ba99eb0a..cd841e81d36 100644 --- a/src/current/v24.2/functions-and-operators.md +++ b/src/current/v24.2/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v24.2/install-client-drivers.md b/src/current/v24.2/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v24.2/install-client-drivers.md +++ b/src/current/v24.2/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v24.2/install-cockroachdb-mac.md b/src/current/v24.2/install-cockroachdb-mac.md index 5155c5bf0cd..eaaa8742c52 100644 --- a/src/current/v24.2/install-cockroachdb-mac.md +++ b/src/current/v24.2/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v24.2/intellij-idea.md b/src/current/v24.2/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v24.2/intellij-idea.md +++ b/src/current/v24.2/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v24.2/known-limitations.md b/src/current/v24.2/known-limitations.md index 71b83b7b5c4..8cadb45260c 100644 --- a/src/current/v24.2/known-limitations.md +++ b/src/current/v24.2/known-limitations.md @@ -53,7 +53,7 @@ CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/d #### `CAST` expressions containing a subquery with an `ENUM` target are not supported -Casting subqueries to ENUMs in views and UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +Casting subqueries to ENUMs in views and UDFs is not supported. #108184 #### Statements containing multiple modification subqueries of the same table are disallowed @@ -62,13 +62,13 @@ Statements containing multiple modification subqueries mutating the same row cou - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). -If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) +If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 #### Using `default_int_size` session variable in batch of statements When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::IN`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. -As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 #### Overload resolution for collated strings @@ -95,15 +95,15 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[#10679](https://github.com/cockroachdb/cockroach/issues/10679) +#10679 #### Current sequence value not checked when updating min/max value -Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 #### `null_ordered_last` does not produce correct results with tuples -By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 ### Functions and procedures @@ -149,10 +149,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[#46414](https://github.com/cockroachdb/cockroach/issues/46414) +#46414 #### `CANCEL JOB` limitations @@ -203,7 +203,7 @@ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/42508/v24.2 ~~~ -[#42508](https://github.com/cockroachdb/cockroach/issues/42508) +#42508 #### Dropping a column referenced by a partial index @@ -230,9 +230,9 @@ When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: @@ -268,7 +268,7 @@ As a workaround, you can either [manually split a table's columns into multiple #### Placeholders in `PARTITION BY` -{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} [#19464](https://github.com/cockroachdb/cockroach/issues/19464) +{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} #19464 #### Unsupported trigram syntax @@ -284,7 +284,7 @@ The following PostgreSQL syntax and features are currently unsupported for [full #### CockroachDB does not allow inverted indexes with `STORING` -CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) +CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 #### Multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE` @@ -304,19 +304,19 @@ CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}), with the following limitations: -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} @@ -330,9 +330,9 @@ Refer to [`OID` best practices]({% link {{ page.version.version }}/oid.md %}#bes - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(v).x`. [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(v).x`. #114687 #### `ALTER TYPE` limitations @@ -376,7 +376,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[#53410](https://github.com/cockroachdb/cockroach/issues/53410) +#53410 #### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -385,7 +385,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run -This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) +This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 #### Simultaneous client connections and running queries on a single node @@ -421,7 +421,7 @@ As a workaround, [execute the file from the command line]({% link {{ page.versio #### Spatial features disabled for ARM Macs -[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. GitHub tracking issue #### Logging system limitations @@ -466,7 +466,7 @@ Accessing the DB Console for a secure cluster now requires login information (i. The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of a table or database. -As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 #### `SHOW BACKUP` does not support symlinks for nodelocal @@ -492,8 +492,8 @@ Change data capture (CDC) provides efficient, distributed, row-level changefeeds The SQL optimizer has limitations under certain isolation levels: -- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroach/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) +- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. #114737 +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 #### Statistics limitations @@ -502,7 +502,7 @@ The SQL optimizer has limitations under certain isolation levels: #### Incorrect query plans for partitions with `NULL` values -In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 #### Vectorized engine limitations @@ -510,15 +510,15 @@ In cases where the partition definition includes a comparison with `NULL` and a #### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution -The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 #### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns -The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 #### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints -CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. #30192 #### Import with a high amount of disk contention @@ -673,7 +673,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[#59649](https://github.com/cockroachdb/cockroach/issues/59649) +#59649 #### Locality optimized search limitations @@ -686,4 +686,4 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. #### Inverted join for `tsvector` and `tsquery` types is not supported -CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) +CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. #102731 diff --git a/src/current/v24.2/kubernetes-performance.md b/src/current/v24.2/kubernetes-performance.md index 47aef82797d..356ecb6e2fb 100644 --- a/src/current/v24.2/kubernetes-performance.md +++ b/src/current/v24.2/kubernetes-performance.md @@ -20,9 +20,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -332,7 +332,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v24.2/log-formats.md b/src/current/v24.2/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v24.2/log-formats.md +++ b/src/current/v24.2/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.2/logging.md b/src/current/v24.2/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v24.2/logging.md +++ b/src/current/v24.2/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.2/migrate-from-avro.md b/src/current/v24.2/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v24.2/migrate-from-avro.md +++ b/src/current/v24.2/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v24.2/monitor-cockroachdb-kubernetes.md b/src/current/v24.2/monitor-cockroachdb-kubernetes.md index b800eb696dd..7aafea11e7e 100644 --- a/src/current/v24.2/monitor-cockroachdb-kubernetes.md +++ b/src/current/v24.2/monitor-cockroachdb-kubernetes.md @@ -128,7 +128,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -177,14 +177,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -214,12 +214,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -244,12 +244,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v24.2/monitor-cockroachdb-with-prometheus.md b/src/current/v24.2/monitor-cockroachdb-with-prometheus.md index 4d391044080..b8d89a519c2 100644 --- a/src/current/v24.2/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v24.2/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v24.2/monitoring-and-alerting.md b/src/current/v24.2/monitoring-and-alerting.md index 7e6db953257..ef75d9bdb97 100644 --- a/src/current/v24.2/monitoring-and-alerting.md +++ b/src/current/v24.2/monitoring-and-alerting.md @@ -1122,7 +1122,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1131,7 +1131,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1139,7 +1139,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1147,7 +1147,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1163,7 +1163,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1171,7 +1171,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v24.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v24.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index b47593bda96..09b361cd5ac 100644 --- a/src/current/v24.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v24.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v24.2/postgresql-compatibility.md b/src/current/v24.2/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v24.2/postgresql-compatibility.md +++ b/src/current/v24.2/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v24.2/query-spatial-data.md b/src/current/v24.2/query-spatial-data.md index ecda70caa00..149df2b13cb 100644 --- a/src/current/v24.2/query-spatial-data.md +++ b/src/current/v24.2/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v24.2/read-committed.md b/src/current/v24.2/read-committed.md index bcf0a5d9f92..e797812f832 100644 --- a/src/current/v24.2/read-committed.md +++ b/src/current/v24.2/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v24.2/restore.md b/src/current/v24.2/restore.md index 2be5b2b3140..4ecb693e90b 100644 --- a/src/current/v24.2/restore.md +++ b/src/current/v24.2/restore.md @@ -219,11 +219,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v24.2/schedule-cockroachdb-kubernetes.md b/src/current/v24.2/schedule-cockroachdb-kubernetes.md index 973d234ffa9..de53f7c8449 100644 --- a/src/current/v24.2/schedule-cockroachdb-kubernetes.md +++ b/src/current/v24.2/schedule-cockroachdb-kubernetes.md @@ -108,7 +108,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the Operator's custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v24.2/spatial-tutorial.md b/src/current/v24.2/spatial-tutorial.md index b6fd7ab3583..51764c8cea2 100644 --- a/src/current/v24.2/spatial-tutorial.md +++ b/src/current/v24.2/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v24.2/sql-feature-support.md b/src/current/v24.2/sql-feature-support.md index 3ca62dbbc71..ed02b9735af 100644 --- a/src/current/v24.2/sql-feature-support.md +++ b/src/current/v24.2/sql-feature-support.md @@ -193,7 +193,7 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Common table expressions | Partial | Common Extension | [Common Table Expressions documentation]({% link {{ page.version.version }}/common-table-expressions.md %}) Stored procedures | Partial | Common Extension | [Stored procedures documentation]({% link {{ page.version.version }}/stored-procedures.md %}) Cursors | Partial | Standard | [Cursors documentation]({% link {{ page.version.version }}/cursors.md %}) - Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. [GitHub issue tracking trigger support](https://github.com/cockroachdb/cockroach/issues/28296). + Triggers | ✗ | Standard | Execute a set of commands whenever a specified event occurs. GitHub issue tracking trigger support. Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v24.2/sql-name-resolution.md b/src/current/v24.2/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v24.2/sql-name-resolution.md +++ b/src/current/v24.2/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v24.2/st_union.md b/src/current/v24.2/st_union.md index df4a9429bc1..1cb4223c0e7 100644 --- a/src/current/v24.2/st_union.md +++ b/src/current/v24.2/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.2/temporary-tables.md b/src/current/v24.2/temporary-tables.md index bd3a9adefdd..0eda33fc45f 100644 --- a/src/current/v24.2/temporary-tables.md +++ b/src/current/v24.2/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.2/upgrade-cockroach-version.md b/src/current/v24.2/upgrade-cockroach-version.md index fa6774bdefc..01c16033081 100644 --- a/src/current/v24.2/upgrade-cockroach-version.md +++ b/src/current/v24.2/upgrade-cockroach-version.md @@ -336,13 +336,3 @@ In the event of catastrophic failure or corruption, it may be necessary to [rest - [View Version Details]({% link {{ page.version.version }}/cockroach-version.md %}) - [Release notes for our latest version]({% link releases/{{page.version.version}}.md %}) -[#102961]: https://github.com/cockroachdb/cockroach/pull/102961 -[#104265]: https://github.com/cockroachdb/cockroach/pull/104265 -[#107474]: https://github.com/cockroachdb/cockroach/pull/107474 -[#106177]: https://github.com/cockroachdb/cockroach/pull/106177 -[#98308]: https://github.com/cockroachdb/cockroach/pull/98308 -[#105477]: https://github.com/cockroachdb/cockroach/pull/105477 -[#107903]: https://github.com/cockroachdb/cockroach/pull/107903 -[#108190]: https://github.com/cockroachdb/cockroach/pull/108190 -[#110649]: https://github.com/cockroachdb/cockroach/pull/110649 -[#114203]: https://github.com/cockroachdb/cockroach/pull/114203 diff --git a/src/current/v24.2/views.md b/src/current/v24.2/views.md index 1b703728bcf..ce63d6cd0f5 100644 --- a/src/current/v24.2/views.md +++ b/src/current/v24.2/views.md @@ -634,7 +634,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.3/admission-control.md b/src/current/v24.3/admission-control.md index 2b1c1f25abc..5fc915c9a84 100644 --- a/src/current/v24.3/admission-control.md +++ b/src/current/v24.3/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -150,6 +150,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v24.3/architecture/sql-layer.md b/src/current/v24.3/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v24.3/architecture/sql-layer.md +++ b/src/current/v24.3/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v24.3/cluster-settings.md b/src/current/v24.3/cluster-settings.md index 8af2fe82670..e992eca50e4 100644 --- a/src/current/v24.3/cluster-settings.md +++ b/src/current/v24.3/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.3/cluster-setup-troubleshooting.md b/src/current/v24.3/cluster-setup-troubleshooting.md index f013cdd8bc8..91cb877101b 100644 --- a/src/current/v24.3/cluster-setup-troubleshooting.md +++ b/src/current/v24.3/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v24.3/cockroachdb-feature-availability.md b/src/current/v24.3/cockroachdb-feature-availability.md index 27cbee80074..d166953fd28 100644 --- a/src/current/v24.3/cockroachdb-feature-availability.md +++ b/src/current/v24.3/cockroachdb-feature-availability.md @@ -169,7 +169,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -204,7 +204,7 @@ CockroachDB supports [altering the column types]({% link {{ page.version.version ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v24.3/common-table-expressions.md b/src/current/v24.3/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v24.3/common-table-expressions.md +++ b/src/current/v24.3/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v24.3/configure-replication-zones.md b/src/current/v24.3/configure-replication-zones.md index 090ea83714b..c9161424a5f 100644 --- a/src/current/v24.3/configure-replication-zones.md +++ b/src/current/v24.3/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v24.3/create-sequence.md b/src/current/v24.3/create-sequence.md index bae71037988..0e6019364ac 100644 --- a/src/current/v24.3/create-sequence.md +++ b/src/current/v24.3/create-sequence.md @@ -59,7 +59,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.3/create-table.md b/src/current/v24.3/create-table.md index d1a4c389267..d6738c3e0a0 100644 --- a/src/current/v24.3/create-table.md +++ b/src/current/v24.3/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see [#42508](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see #42508. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v24.3/disaster-recovery-planning.md b/src/current/v24.3/disaster-recovery-planning.md index ae6bdbc5f1b..bcc6a4a29e3 100644 --- a/src/current/v24.3/disaster-recovery-planning.md +++ b/src/current/v24.3/disaster-recovery-planning.md @@ -320,7 +320,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v24.3/eventlog.md b/src/current/v24.3/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v24.3/eventlog.md +++ b/src/current/v24.3/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.3/example-apps.md b/src/current/v24.3/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v24.3/example-apps.md +++ b/src/current/v24.3/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v24.3/file-an-issue.md b/src/current/v24.3/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v24.3/file-an-issue.md +++ b/src/current/v24.3/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v24.3/fips.md b/src/current/v24.3/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v24.3/fips.md +++ b/src/current/v24.3/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v24.3/foreign-key.md b/src/current/v24.3/foreign-key.md index 55677511e3d..7b06a11d484 100644 --- a/src/current/v24.3/foreign-key.md +++ b/src/current/v24.3/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v24.3/functions-and-operators.md b/src/current/v24.3/functions-and-operators.md index 9515f188f77..6fb648e24ff 100644 --- a/src/current/v24.3/functions-and-operators.md +++ b/src/current/v24.3/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v24.3/install-client-drivers.md b/src/current/v24.3/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v24.3/install-client-drivers.md +++ b/src/current/v24.3/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v24.3/install-cockroachdb-mac.md b/src/current/v24.3/install-cockroachdb-mac.md index b8e5583b7f0..2cab92da935 100644 --- a/src/current/v24.3/install-cockroachdb-mac.md +++ b/src/current/v24.3/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v24.3/intellij-idea.md b/src/current/v24.3/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v24.3/intellij-idea.md +++ b/src/current/v24.3/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v24.3/known-limitations.md b/src/current/v24.3/known-limitations.md index 608cf909f5a..893d9bfe965 100644 --- a/src/current/v24.3/known-limitations.md +++ b/src/current/v24.3/known-limitations.md @@ -64,7 +64,7 @@ CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/d #### `CAST` expressions containing a subquery with an `ENUM` target are not supported -Casting subqueries to ENUMs in views and UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +Casting subqueries to ENUMs in views and UDFs is not supported. #108184 #### Statements containing multiple modification subqueries of the same table are disallowed @@ -73,13 +73,13 @@ Statements containing multiple modification subqueries mutating the same row cou - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). -If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) +If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 #### Using `default_int_size` session variable in batch of statements When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::IN`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. -As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 #### Overload resolution for collated strings @@ -106,15 +106,15 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[#10679](https://github.com/cockroachdb/cockroach/issues/10679) +#10679 #### Current sequence value not checked when updating min/max value -Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 #### `null_ordered_last` does not produce correct results with tuples -By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 ### Functions and procedures @@ -160,10 +160,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[#46414](https://github.com/cockroachdb/cockroach/issues/46414) +#46414 #### `CANCEL JOB` limitations @@ -214,7 +214,7 @@ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/42508/v24.2 ~~~ -[#42508](https://github.com/cockroachdb/cockroach/issues/42508) +#42508 #### Dropping a column referenced by a partial index @@ -241,9 +241,9 @@ When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: @@ -279,7 +279,7 @@ As a workaround, you can either [manually split a table's columns into multiple #### Placeholders in `PARTITION BY` -{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} [#19464](https://github.com/cockroachdb/cockroach/issues/19464) +{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} #19464 #### Unsupported trigram syntax @@ -295,7 +295,7 @@ The following PostgreSQL syntax and features are currently unsupported for [full #### CockroachDB does not allow inverted indexes with `STORING` -CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) +CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 #### Multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE` @@ -315,19 +315,19 @@ CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}), with the following limitations: -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} @@ -341,9 +341,9 @@ Refer to [`OID` best practices]({% link {{ page.version.version }}/oid.md %}#bes - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 #### `ALTER TYPE` limitations @@ -387,7 +387,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[#53410](https://github.com/cockroachdb/cockroach/issues/53410) +#53410 #### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -396,7 +396,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run -This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) +This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 #### Simultaneous client connections and running queries on a single node @@ -432,7 +432,7 @@ As a workaround, [execute the file from the command line]({% link {{ page.versio #### Spatial features disabled for ARM Macs -[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. GitHub tracking issue #### Logging system limitations @@ -477,7 +477,7 @@ Accessing the DB Console for a secure cluster now requires login information (i. The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of a table or database. -As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 #### `SHOW BACKUP` does not support symlinks for nodelocal @@ -508,8 +508,8 @@ Change data capture (CDC) provides efficient, distributed, row-level changefeeds The SQL optimizer has limitations under certain isolation levels: -- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroach/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) +- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. #114737 +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 #### Statistics limitations @@ -518,7 +518,7 @@ The SQL optimizer has limitations under certain isolation levels: #### Incorrect query plans for partitions with `NULL` values -In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 #### Vectorized engine limitations @@ -526,15 +526,15 @@ In cases where the partition definition includes a comparison with `NULL` and a #### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution -The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 #### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns -The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 #### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints -CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. #30192 #### Import with a high amount of disk contention @@ -689,7 +689,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[#59649](https://github.com/cockroachdb/cockroach/issues/59649) +#59649 #### Locality optimized search limitations @@ -702,4 +702,4 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. #### Inverted join for `tsvector` and `tsquery` types is not supported -CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) +CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. #102731 diff --git a/src/current/v24.3/kubernetes-performance.md b/src/current/v24.3/kubernetes-performance.md index 47aef82797d..356ecb6e2fb 100644 --- a/src/current/v24.3/kubernetes-performance.md +++ b/src/current/v24.3/kubernetes-performance.md @@ -20,9 +20,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -332,7 +332,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v24.3/log-formats.md b/src/current/v24.3/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v24.3/log-formats.md +++ b/src/current/v24.3/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.3/logging.md b/src/current/v24.3/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v24.3/logging.md +++ b/src/current/v24.3/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v24.3/migrate-from-avro.md b/src/current/v24.3/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v24.3/migrate-from-avro.md +++ b/src/current/v24.3/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v24.3/monitor-cockroachdb-kubernetes.md b/src/current/v24.3/monitor-cockroachdb-kubernetes.md index 44a686fce22..322c288d073 100644 --- a/src/current/v24.3/monitor-cockroachdb-kubernetes.md +++ b/src/current/v24.3/monitor-cockroachdb-kubernetes.md @@ -128,7 +128,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -177,14 +177,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -214,12 +214,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -244,12 +244,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v24.3/monitor-cockroachdb-with-prometheus.md b/src/current/v24.3/monitor-cockroachdb-with-prometheus.md index ad08d818b4e..2c661d20b66 100644 --- a/src/current/v24.3/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v24.3/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v24.3/monitoring-and-alerting.md b/src/current/v24.3/monitoring-and-alerting.md index 126ea495e7e..f9285b8e419 100644 --- a/src/current/v24.3/monitoring-and-alerting.md +++ b/src/current/v24.3/monitoring-and-alerting.md @@ -1116,7 +1116,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1125,7 +1125,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1133,7 +1133,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1141,7 +1141,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1157,7 +1157,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1165,7 +1165,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v24.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v24.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index b47593bda96..09b361cd5ac 100644 --- a/src/current/v24.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v24.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v24.3/postgresql-compatibility.md b/src/current/v24.3/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v24.3/postgresql-compatibility.md +++ b/src/current/v24.3/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v24.3/query-spatial-data.md b/src/current/v24.3/query-spatial-data.md index 678918809cc..9e8211cbc3a 100644 --- a/src/current/v24.3/query-spatial-data.md +++ b/src/current/v24.3/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v24.3/read-committed.md b/src/current/v24.3/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v24.3/read-committed.md +++ b/src/current/v24.3/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v24.3/restore.md b/src/current/v24.3/restore.md index dbd4512c271..5a2c1f5bc0f 100644 --- a/src/current/v24.3/restore.md +++ b/src/current/v24.3/restore.md @@ -219,11 +219,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v24.3/schedule-cockroachdb-kubernetes.md b/src/current/v24.3/schedule-cockroachdb-kubernetes.md index 973d234ffa9..de53f7c8449 100644 --- a/src/current/v24.3/schedule-cockroachdb-kubernetes.md +++ b/src/current/v24.3/schedule-cockroachdb-kubernetes.md @@ -108,7 +108,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the Operator's custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v24.3/spatial-tutorial.md b/src/current/v24.3/spatial-tutorial.md index bf735930eab..7a313dee0d0 100644 --- a/src/current/v24.3/spatial-tutorial.md +++ b/src/current/v24.3/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v24.3/sql-feature-support.md b/src/current/v24.3/sql-feature-support.md index df9fa0711d8..d51a2393894 100644 --- a/src/current/v24.3/sql-feature-support.md +++ b/src/current/v24.3/sql-feature-support.md @@ -194,4 +194,4 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v24.3/sql-name-resolution.md b/src/current/v24.3/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v24.3/sql-name-resolution.md +++ b/src/current/v24.3/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v24.3/st_union.md b/src/current/v24.3/st_union.md index 9ee0110f404..ca36102845d 100644 --- a/src/current/v24.3/st_union.md +++ b/src/current/v24.3/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.3/temporary-tables.md b/src/current/v24.3/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v24.3/temporary-tables.md +++ b/src/current/v24.3/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v24.3/views.md b/src/current/v24.3/views.md index 1b703728bcf..ce63d6cd0f5 100644 --- a/src/current/v24.3/views.md +++ b/src/current/v24.3/views.md @@ -634,7 +634,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.1/admission-control.md b/src/current/v25.1/admission-control.md index 83a7f42d689..2bb16075f5c 100644 --- a/src/current/v25.1/admission-control.md +++ b/src/current/v25.1/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -167,6 +167,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v25.1/architecture/sql-layer.md b/src/current/v25.1/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v25.1/architecture/sql-layer.md +++ b/src/current/v25.1/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v25.1/cluster-settings.md b/src/current/v25.1/cluster-settings.md index 63169bea6cf..775dca5445c 100644 --- a/src/current/v25.1/cluster-settings.md +++ b/src/current/v25.1/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v25.1/cluster-setup-troubleshooting.md b/src/current/v25.1/cluster-setup-troubleshooting.md index 35dfd522e9f..cd71e263182 100644 --- a/src/current/v25.1/cluster-setup-troubleshooting.md +++ b/src/current/v25.1/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v25.1/cockroachdb-feature-availability.md b/src/current/v25.1/cockroachdb-feature-availability.md index 8248a4f9a84..e7105bba42c 100644 --- a/src/current/v25.1/cockroachdb-feature-availability.md +++ b/src/current/v25.1/cockroachdb-feature-availability.md @@ -169,7 +169,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -200,7 +200,7 @@ The [`SHOW RANGE ... FOR ROW`]({% link {{ page.version.version }}/show-range-for ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v25.1/common-table-expressions.md b/src/current/v25.1/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v25.1/common-table-expressions.md +++ b/src/current/v25.1/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v25.1/configure-replication-zones.md b/src/current/v25.1/configure-replication-zones.md index ff025802628..5a8c8cfa16f 100644 --- a/src/current/v25.1/configure-replication-zones.md +++ b/src/current/v25.1/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v25.1/create-sequence.md b/src/current/v25.1/create-sequence.md index f623b19c6db..8ac712d322e 100644 --- a/src/current/v25.1/create-sequence.md +++ b/src/current/v25.1/create-sequence.md @@ -58,7 +58,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.1/create-table.md b/src/current/v25.1/create-table.md index d1a4c389267..d6738c3e0a0 100644 --- a/src/current/v25.1/create-table.md +++ b/src/current/v25.1/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see [#42508](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see #42508. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v25.1/disaster-recovery-planning.md b/src/current/v25.1/disaster-recovery-planning.md index ae6bdbc5f1b..bcc6a4a29e3 100644 --- a/src/current/v25.1/disaster-recovery-planning.md +++ b/src/current/v25.1/disaster-recovery-planning.md @@ -320,7 +320,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v25.1/eventlog.md b/src/current/v25.1/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v25.1/eventlog.md +++ b/src/current/v25.1/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.1/example-apps.md b/src/current/v25.1/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v25.1/example-apps.md +++ b/src/current/v25.1/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v25.1/file-an-issue.md b/src/current/v25.1/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v25.1/file-an-issue.md +++ b/src/current/v25.1/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v25.1/fips.md b/src/current/v25.1/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v25.1/fips.md +++ b/src/current/v25.1/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v25.1/foreign-key.md b/src/current/v25.1/foreign-key.md index 55677511e3d..7b06a11d484 100644 --- a/src/current/v25.1/foreign-key.md +++ b/src/current/v25.1/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v25.1/functions-and-operators.md b/src/current/v25.1/functions-and-operators.md index 550ba99eb0a..cd841e81d36 100644 --- a/src/current/v25.1/functions-and-operators.md +++ b/src/current/v25.1/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v25.1/install-client-drivers.md b/src/current/v25.1/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v25.1/install-client-drivers.md +++ b/src/current/v25.1/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v25.1/install-cockroachdb-mac.md b/src/current/v25.1/install-cockroachdb-mac.md index b8e5583b7f0..2cab92da935 100644 --- a/src/current/v25.1/install-cockroachdb-mac.md +++ b/src/current/v25.1/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v25.1/intellij-idea.md b/src/current/v25.1/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v25.1/intellij-idea.md +++ b/src/current/v25.1/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v25.1/known-limitations.md b/src/current/v25.1/known-limitations.md index 4f76260d1aa..338f87b2fee 100644 --- a/src/current/v25.1/known-limitations.md +++ b/src/current/v25.1/known-limitations.md @@ -49,7 +49,7 @@ CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/d #### `CAST` expressions containing a subquery with an `ENUM` target are not supported -Casting subqueries to ENUMs in views and UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +Casting subqueries to ENUMs in views and UDFs is not supported. #108184 #### Statements containing multiple modification subqueries of the same table are disallowed @@ -58,13 +58,13 @@ Statements containing multiple modification subqueries mutating the same row cou - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). -If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) +If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 #### Using `default_int_size` session variable in batch of statements When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::IN`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. -As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 #### Overload resolution for collated strings @@ -91,15 +91,15 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[#10679](https://github.com/cockroachdb/cockroach/issues/10679) +#10679 #### Current sequence value not checked when updating min/max value -Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 #### `null_ordered_last` does not produce correct results with tuples -By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 ### Functions and procedures @@ -149,10 +149,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[#46414](https://github.com/cockroachdb/cockroach/issues/46414) +#46414 #### `CANCEL JOB` limitations @@ -203,7 +203,7 @@ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/42508/v24.2 ~~~ -[#42508](https://github.com/cockroachdb/cockroach/issues/42508) +#42508 #### Dropping a column referenced by a partial index @@ -230,9 +230,9 @@ When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: @@ -268,7 +268,7 @@ As a workaround, you can either [manually split a table's columns into multiple #### Placeholders in `PARTITION BY` -{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} [#19464](https://github.com/cockroachdb/cockroach/issues/19464) +{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} #19464 #### Unsupported trigram syntax @@ -284,7 +284,7 @@ The following PostgreSQL syntax and features are currently unsupported for [full #### CockroachDB does not allow inverted indexes with `STORING` -CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) +CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 #### Multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE` @@ -304,19 +304,19 @@ CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}), with the following limitations: -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} @@ -330,9 +330,9 @@ Refer to [`OID` best practices]({% link {{ page.version.version }}/oid.md %}#bes - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 #### `ALTER TYPE` limitations @@ -376,7 +376,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[#53410](https://github.com/cockroachdb/cockroach/issues/53410) +#53410 #### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -385,7 +385,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run -This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) +This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 #### Simultaneous client connections and running queries on a single node @@ -421,7 +421,7 @@ As a workaround, [execute the file from the command line]({% link {{ page.versio #### Spatial features disabled for ARM Macs -[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. GitHub tracking issue #### Logging system limitations @@ -466,7 +466,7 @@ Accessing the DB Console for a secure cluster now requires login information (i. The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of a table or database. -As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 #### `SHOW BACKUP` does not support symlinks for nodelocal @@ -507,8 +507,8 @@ Change data capture (CDC) provides efficient, distributed, row-level changefeeds The SQL optimizer has limitations under certain isolation levels: -- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroach/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) +- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. #114737 +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 #### Statistics limitations @@ -517,7 +517,7 @@ The SQL optimizer has limitations under certain isolation levels: #### Incorrect query plans for partitions with `NULL` values -In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 #### Vectorized engine limitations @@ -525,15 +525,15 @@ In cases where the partition definition includes a comparison with `NULL` and a #### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution -The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 #### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns -The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 #### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints -CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. #30192 #### Import with a high amount of disk contention @@ -688,7 +688,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[#59649](https://github.com/cockroachdb/cockroach/issues/59649) +#59649 #### Locality optimized search limitations @@ -701,4 +701,4 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. #### Inverted join for `tsvector` and `tsquery` types is not supported -CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) +CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. #102731 diff --git a/src/current/v25.1/kubernetes-performance.md b/src/current/v25.1/kubernetes-performance.md index 47aef82797d..356ecb6e2fb 100644 --- a/src/current/v25.1/kubernetes-performance.md +++ b/src/current/v25.1/kubernetes-performance.md @@ -20,9 +20,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -332,7 +332,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v25.1/log-formats.md b/src/current/v25.1/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v25.1/log-formats.md +++ b/src/current/v25.1/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.1/logging.md b/src/current/v25.1/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v25.1/logging.md +++ b/src/current/v25.1/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.1/migrate-from-avro.md b/src/current/v25.1/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v25.1/migrate-from-avro.md +++ b/src/current/v25.1/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v25.1/monitor-cockroachdb-kubernetes.md b/src/current/v25.1/monitor-cockroachdb-kubernetes.md index cf1ba80217b..59bfa69d9b3 100644 --- a/src/current/v25.1/monitor-cockroachdb-kubernetes.md +++ b/src/current/v25.1/monitor-cockroachdb-kubernetes.md @@ -128,7 +128,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -177,14 +177,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -214,12 +214,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -244,12 +244,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v25.1/monitor-cockroachdb-with-prometheus.md b/src/current/v25.1/monitor-cockroachdb-with-prometheus.md index eceee578492..bde659b39e4 100644 --- a/src/current/v25.1/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v25.1/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v25.1/monitoring-and-alerting.md b/src/current/v25.1/monitoring-and-alerting.md index 8fbd199dbbd..7902d78e8ea 100644 --- a/src/current/v25.1/monitoring-and-alerting.md +++ b/src/current/v25.1/monitoring-and-alerting.md @@ -1122,7 +1122,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1131,7 +1131,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1139,7 +1139,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1147,7 +1147,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1163,7 +1163,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1171,7 +1171,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v25.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v25.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index b47593bda96..09b361cd5ac 100644 --- a/src/current/v25.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v25.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v25.1/postgresql-compatibility.md b/src/current/v25.1/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v25.1/postgresql-compatibility.md +++ b/src/current/v25.1/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v25.1/query-spatial-data.md b/src/current/v25.1/query-spatial-data.md index 7238ce52cf5..e46cdbc0cc2 100644 --- a/src/current/v25.1/query-spatial-data.md +++ b/src/current/v25.1/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v25.1/read-committed.md b/src/current/v25.1/read-committed.md index bcf0a5d9f92..e797812f832 100644 --- a/src/current/v25.1/read-committed.md +++ b/src/current/v25.1/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v25.1/restore.md b/src/current/v25.1/restore.md index 162b6787a56..e65de2898e7 100644 --- a/src/current/v25.1/restore.md +++ b/src/current/v25.1/restore.md @@ -217,11 +217,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v25.1/schedule-cockroachdb-kubernetes.md b/src/current/v25.1/schedule-cockroachdb-kubernetes.md index 973d234ffa9..de53f7c8449 100644 --- a/src/current/v25.1/schedule-cockroachdb-kubernetes.md +++ b/src/current/v25.1/schedule-cockroachdb-kubernetes.md @@ -108,7 +108,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the Operator's custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v25.1/spatial-tutorial.md b/src/current/v25.1/spatial-tutorial.md index 2a201cf7acf..ac953cc7e5f 100644 --- a/src/current/v25.1/spatial-tutorial.md +++ b/src/current/v25.1/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v25.1/sql-feature-support.md b/src/current/v25.1/sql-feature-support.md index df9fa0711d8..d51a2393894 100644 --- a/src/current/v25.1/sql-feature-support.md +++ b/src/current/v25.1/sql-feature-support.md @@ -194,4 +194,4 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v25.1/sql-name-resolution.md b/src/current/v25.1/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v25.1/sql-name-resolution.md +++ b/src/current/v25.1/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v25.1/st_union.md b/src/current/v25.1/st_union.md index df4a9429bc1..1cb4223c0e7 100644 --- a/src/current/v25.1/st_union.md +++ b/src/current/v25.1/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.1/temporary-tables.md b/src/current/v25.1/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v25.1/temporary-tables.md +++ b/src/current/v25.1/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.1/views.md b/src/current/v25.1/views.md index 1b703728bcf..ce63d6cd0f5 100644 --- a/src/current/v25.1/views.md +++ b/src/current/v25.1/views.md @@ -634,7 +634,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.2/admission-control.md b/src/current/v25.2/admission-control.md index 4870fb94728..2d8806767fb 100644 --- a/src/current/v25.2/admission-control.md +++ b/src/current/v25.2/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -168,6 +168,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v25.2/architecture/sql-layer.md b/src/current/v25.2/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v25.2/architecture/sql-layer.md +++ b/src/current/v25.2/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v25.2/cluster-settings.md b/src/current/v25.2/cluster-settings.md index a4e85a419bf..8f22a3214c2 100644 --- a/src/current/v25.2/cluster-settings.md +++ b/src/current/v25.2/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.2/cluster-setup-troubleshooting.md b/src/current/v25.2/cluster-setup-troubleshooting.md index 00decb5a647..ab2aed9a349 100644 --- a/src/current/v25.2/cluster-setup-troubleshooting.md +++ b/src/current/v25.2/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v25.2/cockroachdb-feature-availability.md b/src/current/v25.2/cockroachdb-feature-availability.md index aabc6b915a0..968b503ceb9 100644 --- a/src/current/v25.2/cockroachdb-feature-availability.md +++ b/src/current/v25.2/cockroachdb-feature-availability.md @@ -183,7 +183,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -214,7 +214,7 @@ The [`SHOW RANGE ... FOR ROW`]({% link {{ page.version.version }}/show-range-for ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v25.2/common-table-expressions.md b/src/current/v25.2/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v25.2/common-table-expressions.md +++ b/src/current/v25.2/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v25.2/configure-replication-zones.md b/src/current/v25.2/configure-replication-zones.md index 927b5e363f3..32e8fe837ce 100644 --- a/src/current/v25.2/configure-replication-zones.md +++ b/src/current/v25.2/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v25.2/create-sequence.md b/src/current/v25.2/create-sequence.md index 36d8fd1da0b..8819d2178d9 100644 --- a/src/current/v25.2/create-sequence.md +++ b/src/current/v25.2/create-sequence.md @@ -59,7 +59,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.2/create-table.md b/src/current/v25.2/create-table.md index e14849f0f2c..8259b7ad462 100644 --- a/src/current/v25.2/create-table.md +++ b/src/current/v25.2/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see [#42508](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see #42508. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v25.2/disaster-recovery-planning.md b/src/current/v25.2/disaster-recovery-planning.md index 59b05402692..830ed77a110 100644 --- a/src/current/v25.2/disaster-recovery-planning.md +++ b/src/current/v25.2/disaster-recovery-planning.md @@ -322,7 +322,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v25.2/eventlog.md b/src/current/v25.2/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v25.2/eventlog.md +++ b/src/current/v25.2/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.2/example-apps.md b/src/current/v25.2/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v25.2/example-apps.md +++ b/src/current/v25.2/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v25.2/file-an-issue.md b/src/current/v25.2/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v25.2/file-an-issue.md +++ b/src/current/v25.2/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v25.2/fips.md b/src/current/v25.2/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v25.2/fips.md +++ b/src/current/v25.2/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v25.2/foreign-key.md b/src/current/v25.2/foreign-key.md index 55677511e3d..7b06a11d484 100644 --- a/src/current/v25.2/foreign-key.md +++ b/src/current/v25.2/foreign-key.md @@ -93,7 +93,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v25.2/functions-and-operators.md b/src/current/v25.2/functions-and-operators.md index 9515f188f77..6fb648e24ff 100644 --- a/src/current/v25.2/functions-and-operators.md +++ b/src/current/v25.2/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v25.2/install-client-drivers.md b/src/current/v25.2/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v25.2/install-client-drivers.md +++ b/src/current/v25.2/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v25.2/install-cockroachdb-mac.md b/src/current/v25.2/install-cockroachdb-mac.md index b8e5583b7f0..2cab92da935 100644 --- a/src/current/v25.2/install-cockroachdb-mac.md +++ b/src/current/v25.2/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v25.2/intellij-idea.md b/src/current/v25.2/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v25.2/intellij-idea.md +++ b/src/current/v25.2/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v25.2/known-limitations.md b/src/current/v25.2/known-limitations.md index e18fdc0a835..eb23b3fcff1 100644 --- a/src/current/v25.2/known-limitations.md +++ b/src/current/v25.2/known-limitations.md @@ -59,7 +59,7 @@ CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/d #### `CAST` expressions containing a subquery with an `ENUM` target are not supported -Casting subqueries to ENUMs in views and UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +Casting subqueries to ENUMs in views and UDFs is not supported. #108184 #### Statements containing multiple modification subqueries of the same table are disallowed @@ -68,13 +68,13 @@ Statements containing multiple modification subqueries mutating the same row cou - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). -If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) +If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 #### Using `default_int_size` session variable in batch of statements When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::IN`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. -As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 #### Overload resolution for collated strings @@ -101,15 +101,15 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[#10679](https://github.com/cockroachdb/cockroach/issues/10679) +#10679 #### Current sequence value not checked when updating min/max value -Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 #### `null_ordered_last` does not produce correct results with tuples -By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 ### Functions and procedures @@ -159,10 +159,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[#46414](https://github.com/cockroachdb/cockroach/issues/46414) +#46414 #### `CANCEL JOB` limitations @@ -213,7 +213,7 @@ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/42508/v24.2 ~~~ -[#42508](https://github.com/cockroachdb/cockroach/issues/42508) +#42508 #### Dropping a column referenced by a partial index @@ -240,9 +240,9 @@ When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: @@ -278,7 +278,7 @@ As a workaround, you can either [manually split a table's columns into multiple #### Placeholders in `PARTITION BY` -{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} [#19464](https://github.com/cockroachdb/cockroach/issues/19464) +{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} #19464 #### Unsupported trigram syntax @@ -294,7 +294,7 @@ The following PostgreSQL syntax and features are currently unsupported for [full #### CockroachDB does not allow inverted indexes with `STORING` -CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) +CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 #### Multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE` @@ -314,19 +314,19 @@ CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}), with the following limitations: -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} @@ -340,9 +340,9 @@ Refer to [`OID` best practices]({% link {{ page.version.version }}/oid.md %}#bes - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 #### `ALTER TYPE` limitations @@ -386,7 +386,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[#53410](https://github.com/cockroachdb/cockroach/issues/53410) +#53410 #### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -395,7 +395,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run -This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) +This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 #### Simultaneous client connections and running queries on a single node @@ -431,7 +431,7 @@ As a workaround, [execute the file from the command line]({% link {{ page.versio #### Spatial features disabled for ARM Macs -[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. GitHub tracking issue #### Logging system limitations @@ -476,7 +476,7 @@ Accessing the DB Console for a secure cluster now requires login information (i. The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of a table or database. -As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 #### `SHOW BACKUP` does not support symlinks for nodelocal @@ -517,8 +517,8 @@ Change data capture (CDC) provides efficient, distributed, row-level changefeeds The SQL optimizer has limitations under certain isolation levels: -- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroach/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) +- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. #114737 +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 #### Statistics limitations @@ -527,7 +527,7 @@ The SQL optimizer has limitations under certain isolation levels: #### Incorrect query plans for partitions with `NULL` values -In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 #### Vectorized engine limitations @@ -535,15 +535,15 @@ In cases where the partition definition includes a comparison with `NULL` and a #### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution -The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 #### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns -The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 #### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints -CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. #30192 #### Import with a high amount of disk contention @@ -698,7 +698,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[#59649](https://github.com/cockroachdb/cockroach/issues/59649) +#59649 #### Locality optimized search limitations @@ -711,4 +711,4 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. #### Inverted join for `tsvector` and `tsquery` types is not supported -CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) +CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. #102731 diff --git a/src/current/v25.2/kubernetes-performance.md b/src/current/v25.2/kubernetes-performance.md index 6fa4d534442..cfe4aec6725 100644 --- a/src/current/v25.2/kubernetes-performance.md +++ b/src/current/v25.2/kubernetes-performance.md @@ -24,9 +24,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -336,7 +336,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v25.2/log-formats.md b/src/current/v25.2/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v25.2/log-formats.md +++ b/src/current/v25.2/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.2/logging.md b/src/current/v25.2/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v25.2/logging.md +++ b/src/current/v25.2/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.2/migrate-from-avro.md b/src/current/v25.2/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v25.2/migrate-from-avro.md +++ b/src/current/v25.2/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v25.2/monitor-cockroachdb-kubernetes.md b/src/current/v25.2/monitor-cockroachdb-kubernetes.md index da454617282..7aa8678c973 100644 --- a/src/current/v25.2/monitor-cockroachdb-kubernetes.md +++ b/src/current/v25.2/monitor-cockroachdb-kubernetes.md @@ -132,7 +132,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -181,14 +181,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -218,12 +218,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -248,12 +248,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v25.2/monitor-cockroachdb-operator.md b/src/current/v25.2/monitor-cockroachdb-operator.md index 86cd625d29f..a929a833631 100644 --- a/src/current/v25.2/monitor-cockroachdb-operator.md +++ b/src/current/v25.2/monitor-cockroachdb-operator.md @@ -76,7 +76,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ 1. Apply the Prometheus manifest. This creates the various objects necessary to run a Prometheus instance: @@ -119,13 +119,13 @@ For more details on using the Prometheus UI, see their [official documentation]( ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our [alertmanager-config.yaml](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: +1. Download our [alertmanager-config.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -152,12 +152,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [alertmanager.yaml](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [alertmanager.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ shell alertmanager.monitoring.coreos.com/cockroachdb created @@ -180,12 +180,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ shell prometheusrule.monitoring.coreos.com/prometheus-cockroachdb-rules created diff --git a/src/current/v25.2/monitor-cockroachdb-with-prometheus.md b/src/current/v25.2/monitor-cockroachdb-with-prometheus.md index 573576f0097..f5e07f80181 100644 --- a/src/current/v25.2/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v25.2/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v25.2/monitoring-and-alerting.md b/src/current/v25.2/monitoring-and-alerting.md index 5a5816fb1b3..df82bb0ee81 100644 --- a/src/current/v25.2/monitoring-and-alerting.md +++ b/src/current/v25.2/monitoring-and-alerting.md @@ -1113,7 +1113,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1122,7 +1122,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its `_status/vars` endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1130,7 +1130,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's `_status/vars` output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1138,7 +1138,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1154,7 +1154,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1162,7 +1162,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's `_status/vars` output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v25.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v25.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 55f144f4d74..34fcca4fb4a 100644 --- a/src/current/v25.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v25.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v25.2/postgresql-compatibility.md b/src/current/v25.2/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v25.2/postgresql-compatibility.md +++ b/src/current/v25.2/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v25.2/query-spatial-data.md b/src/current/v25.2/query-spatial-data.md index 572c3d1a1a1..7121ca9aa40 100644 --- a/src/current/v25.2/query-spatial-data.md +++ b/src/current/v25.2/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v25.2/read-committed.md b/src/current/v25.2/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v25.2/read-committed.md +++ b/src/current/v25.2/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v25.2/restore.md b/src/current/v25.2/restore.md index 794b3a338d2..8348ae9adb7 100644 --- a/src/current/v25.2/restore.md +++ b/src/current/v25.2/restore.md @@ -217,11 +217,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v25.2/schedule-cockroachdb-kubernetes.md b/src/current/v25.2/schedule-cockroachdb-kubernetes.md index dd67d369c5e..5972aa3012e 100644 --- a/src/current/v25.2/schedule-cockroachdb-kubernetes.md +++ b/src/current/v25.2/schedule-cockroachdb-kubernetes.md @@ -118,7 +118,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the {{ site.data.products.public-operator }}'s custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v25.2/schedule-cockroachdb-operator.md b/src/current/v25.2/schedule-cockroachdb-operator.md index 4630ea6d9d1..25f040d1a91 100644 --- a/src/current/v25.2/schedule-cockroachdb-operator.md +++ b/src/current/v25.2/schedule-cockroachdb-operator.md @@ -88,7 +88,7 @@ Specify pod affinities and node anti-affinities in `cockroachdb.crdbCluster.podT The {{ site.data.products.cockroachdb-operator }} hard-codes the pod template to only allow one pod per Kubernetes node. If you need to override this value, you can [override the pod template]({% link {{ page.version.version }}/override-templates-cockroachdb-operator.md %}#override-the-default-pod). -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. ~~~ yaml cockroachdb: diff --git a/src/current/v25.2/spatial-tutorial.md b/src/current/v25.2/spatial-tutorial.md index a2f0e59af87..05ea4a5009e 100644 --- a/src/current/v25.2/spatial-tutorial.md +++ b/src/current/v25.2/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v25.2/sql-feature-support.md b/src/current/v25.2/sql-feature-support.md index df9fa0711d8..d51a2393894 100644 --- a/src/current/v25.2/sql-feature-support.md +++ b/src/current/v25.2/sql-feature-support.md @@ -194,4 +194,4 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v25.2/sql-name-resolution.md b/src/current/v25.2/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v25.2/sql-name-resolution.md +++ b/src/current/v25.2/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v25.2/st_union.md b/src/current/v25.2/st_union.md index 01796b337cf..1b58fad694a 100644 --- a/src/current/v25.2/st_union.md +++ b/src/current/v25.2/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.2/temporary-tables.md b/src/current/v25.2/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v25.2/temporary-tables.md +++ b/src/current/v25.2/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.2/views.md b/src/current/v25.2/views.md index f7fe7c58d2a..45b637beed3 100644 --- a/src/current/v25.2/views.md +++ b/src/current/v25.2/views.md @@ -653,7 +653,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.3/admission-control.md b/src/current/v25.3/admission-control.md index dd72fb6dcd4..eee14c739fe 100644 --- a/src/current/v25.3/admission-control.md +++ b/src/current/v25.3/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -166,6 +166,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v25.3/architecture/sql-layer.md b/src/current/v25.3/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v25.3/architecture/sql-layer.md +++ b/src/current/v25.3/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v25.3/cluster-settings.md b/src/current/v25.3/cluster-settings.md index 63169bea6cf..775dca5445c 100644 --- a/src/current/v25.3/cluster-settings.md +++ b/src/current/v25.3/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v25.3/cluster-setup-troubleshooting.md b/src/current/v25.3/cluster-setup-troubleshooting.md index 9972c334d98..605c8176ebe 100644 --- a/src/current/v25.3/cluster-setup-troubleshooting.md +++ b/src/current/v25.3/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. diff --git a/src/current/v25.3/cockroachdb-feature-availability.md b/src/current/v25.3/cockroachdb-feature-availability.md index ff39f751213..b1069bac1cc 100644 --- a/src/current/v25.3/cockroachdb-feature-availability.md +++ b/src/current/v25.3/cockroachdb-feature-availability.md @@ -198,7 +198,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -229,7 +229,7 @@ The [`SHOW RANGE ... FOR ROW`]({% link {{ page.version.version }}/show-range-for ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v25.3/common-table-expressions.md b/src/current/v25.3/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v25.3/common-table-expressions.md +++ b/src/current/v25.3/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v25.3/configure-replication-zones.md b/src/current/v25.3/configure-replication-zones.md index 02752f39314..caa85551d7b 100644 --- a/src/current/v25.3/configure-replication-zones.md +++ b/src/current/v25.3/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v25.3/create-sequence.md b/src/current/v25.3/create-sequence.md index 72bc62b717b..a0794958576 100644 --- a/src/current/v25.3/create-sequence.md +++ b/src/current/v25.3/create-sequence.md @@ -60,7 +60,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.3/create-table.md b/src/current/v25.3/create-table.md index e14849f0f2c..8259b7ad462 100644 --- a/src/current/v25.3/create-table.md +++ b/src/current/v25.3/create-table.md @@ -155,7 +155,7 @@ If you use `GENERATED BY DEFAULT AS IDENTITY` to define the identity column, any Note the following limitations of identity columns: -- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see [#42508](https://github.com/cockroachdb/cockroach/issues/42508). +- `GENERATED ALWAYS AS IDENTITY`/`GENERATED BY DEFAULT AS IDENTITY` is supported in [`ALTER TABLE ... ADD COLUMN`]({% link {{ page.version.version }}/alter-table.md %}#add-column) statements only when the table being altered is empty, as [CockroachDB does not support back-filling sequential column data]({% link {{ page.version.version }}/known-limitations.md %}#adding-a-column-with-sequence-based-default-values). For more information, see #42508. - Unlike PostgreSQL, CockroachDB does not support using the `OVERRIDING SYSTEM VALUE` clause in `INSERT`/`UPDATE`/`UPSERT` statements to overwrite `GENERATED ALWAYS AS IDENTITY` identity column values. For an example of an identity column, see [Create a table with an identity column](#create-a-table-with-an-identity-column). diff --git a/src/current/v25.3/disaster-recovery-planning.md b/src/current/v25.3/disaster-recovery-planning.md index de593915cf0..762eb549920 100644 --- a/src/current/v25.3/disaster-recovery-planning.md +++ b/src/current/v25.3/disaster-recovery-planning.md @@ -322,7 +322,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v25.3/eventlog.md b/src/current/v25.3/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v25.3/eventlog.md +++ b/src/current/v25.3/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.3/example-apps.md b/src/current/v25.3/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v25.3/example-apps.md +++ b/src/current/v25.3/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v25.3/file-an-issue.md b/src/current/v25.3/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v25.3/file-an-issue.md +++ b/src/current/v25.3/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v25.3/fips.md b/src/current/v25.3/fips.md index e490ddcd296..1ca62a6fa8e 100644 --- a/src/current/v25.3/fips.md +++ b/src/current/v25.3/fips.md @@ -58,7 +58,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v25.3/foreign-key.md b/src/current/v25.3/foreign-key.md index 88a094b2a74..81b35083b42 100644 --- a/src/current/v25.3/foreign-key.md +++ b/src/current/v25.3/foreign-key.md @@ -92,7 +92,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v25.3/functions-and-operators.md b/src/current/v25.3/functions-and-operators.md index 9515f188f77..6fb648e24ff 100644 --- a/src/current/v25.3/functions-and-operators.md +++ b/src/current/v25.3/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v25.3/install-client-drivers.md b/src/current/v25.3/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v25.3/install-client-drivers.md +++ b/src/current/v25.3/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v25.3/install-cockroachdb-mac.md b/src/current/v25.3/install-cockroachdb-mac.md index b8e5583b7f0..2cab92da935 100644 --- a/src/current/v25.3/install-cockroachdb-mac.md +++ b/src/current/v25.3/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v25.3/intellij-idea.md b/src/current/v25.3/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v25.3/intellij-idea.md +++ b/src/current/v25.3/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v25.3/known-limitations.md b/src/current/v25.3/known-limitations.md index ddc193ed9e9..b588307b904 100644 --- a/src/current/v25.3/known-limitations.md +++ b/src/current/v25.3/known-limitations.md @@ -69,7 +69,7 @@ CockroachDB supports the [PostgreSQL wire protocol](https://www.postgresql.org/d #### `CAST` expressions containing a subquery with an `ENUM` target are not supported -Casting subqueries to ENUMs in views and UDFs is not supported. [#108184](https://github.com/cockroachdb/cockroach/issues/108184) +Casting subqueries to ENUMs in views and UDFs is not supported. #108184 #### Statements containing multiple modification subqueries of the same table are disallowed @@ -78,13 +78,13 @@ Statements containing multiple modification subqueries mutating the same row cou - Set the `sql.multiple_modifications_of_table.enabled` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) to `true`. - Use the `enable_multiple_modifications_of_table` [session variable]({% link {{ page.version.version }}/set-vars.md %}). -If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. [#70731](https://github.com/cockroachdb/cockroach/issues/70731) +If multiple mutations inside the same statement affect different tables with [`FOREIGN KEY`]({% link {{ page.version.version }}/foreign-key.md %}) relations and `ON CASCADE` clauses between them, the results will be different from what is expected in PostgreSQL. #70731 #### Using `default_int_size` session variable in batch of statements When setting the `default_int_size` [session variable]({% link {{ page.version.version }}/set-vars.md %}) in a batch of statements such as `SET default_int_size='int4'; SELECT 1::IN`, the `default_int_size` variable will not take effect until the next statement. Statement parsing is asynchronous with statement execution. -As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. [#32846](https://github.com/cockroachdb/cockroach/issues/32846) +As a workaround, set `default_int_size` via your database driver, or ensure that `SET default_int_size` is in its own statement. #32846 #### Overload resolution for collated strings @@ -111,15 +111,15 @@ Many string operations are not properly overloaded for [collated strings]({% lin pq: unsupported binary operator: || ~~~ -[#10679](https://github.com/cockroachdb/cockroach/issues/10679) +#10679 #### Current sequence value not checked when updating min/max value -Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. [#23719](https://github.com/cockroachdb/cockroach/issues/23719) +Altering the minimum or maximum value of a series does not check the current value of a series. This means that it is possible to silently set the maximum to a value less than, or a minimum value greater than, the current value. #23719 #### `null_ordered_last` does not produce correct results with tuples -By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. [#93558](https://github.com/cockroachdb/cockroach/issues/93558) +By default, CockroachDB orders `NULL`s before all other values. For compatibility with PostgreSQL, the `null_ordered_last` [session variable]({% link {{ page.version.version }}/set-vars.md %}) was added, which changes the default to order `NULL` values after all other values. This works in most cases, due to some transformations CockroachDB makes in the optimizer to add extra ordering columns. However, it does not work when the ordering column is a tuple. #93558 ### Functions and procedures @@ -169,10 +169,10 @@ Transactions with [priority `HIGH`]({% link {{ page.version.version }}/transacti ERROR: unimplemented: cannot use ROLLBACK TO SAVEPOINT in a HIGH PRIORITY transaction containing DDL SQLSTATE: 0A000 HINT: You have attempted to use a feature that is not yet implemented. -See: https://github.com/cockroachdb/cockroach/issues/46414 +See: cockroach#46414 ~~~ -[#46414](https://github.com/cockroachdb/cockroach/issues/46414) +#46414 #### `CANCEL JOB` limitations @@ -227,7 +227,7 @@ HINT: You have attempted to use a feature that is not yet implemented. See: https://go.crdb.dev/issue-v/42508/v24.2 ~~~ -[#42508](https://github.com/cockroachdb/cockroach/issues/42508) +#42508 #### Dropping a column referenced by a partial index @@ -254,9 +254,9 @@ When executing an [`ALTER TABLE ADD COLUMN`]({% link {{ page.version.version }}/ Some [schema changes]({% link {{ page.version.version }}/online-schema-changes.md %}) that [drop columns]({% link {{ page.version.version }}/alter-table.md %}#drop-column) cannot be [rolled back]({% link {{ page.version.version }}/rollback-transaction.md %}) properly. -In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. [#46541](https://github.com/cockroachdb/cockroach/issues/46541) +In some cases, the rollback will succeed, but the column data might be partially or totally missing, or stale due to the asynchronous nature of the schema change. #46541 -In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. [#47712](https://github.com/cockroachdb/cockroach/issues/47712) +In other cases, the rollback will fail in such a way that will never be cleaned up properly, leaving the table descriptor in a state where no other schema changes can be run successfully. #47712 To reduce the chance that a column drop will roll back incorrectly: @@ -292,7 +292,7 @@ As a workaround, you can either [manually split a table's columns into multiple #### Placeholders in `PARTITION BY` -{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} [#19464](https://github.com/cockroachdb/cockroach/issues/19464) +{% include {{ page.version.version }}/known-limitations/partitioning-with-placeholders.md %} #19464 #### Unsupported trigram syntax @@ -308,7 +308,7 @@ The following PostgreSQL syntax and features are currently unsupported for [full #### CockroachDB does not allow inverted indexes with `STORING` -CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). [#88278](https://github.com/cockroachdb/cockroach/issues/88278) +CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ page.version.version }}/create-index.md %}#store-columns). #88278 #### Multiple arbiter indexes for `INSERT ON CONFLICT DO UPDATE` @@ -328,19 +328,19 @@ CockroachDB does not allow inverted indexes with a [`STORING` column]({% link {{ CockroachDB supports efficiently storing and querying [spatial data]({% link {{ page.version.version }}/export-spatial-data.md %}), with the following limitations: -- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. [#49203](https://github.com/cockroachdb/cockroach/issues/49203) +- Not all [PostGIS spatial functions](https://postgis.net/docs/reference.html) are supported. #49203 -- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. [#49402](https://github.com/cockroachdb/cockroach/issues/49402) +- The `AddGeometryColumn` [spatial function]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions) only allows constant arguments. #49402 -- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. [#49448](https://github.com/cockroachdb/cockroach/issues/49448) +- The `AddGeometryColumn` spatial function only allows the `true` value for its `use_typmod` parameter. #49448 -- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. [#56124](https://github.com/cockroachdb/cockroach/issues/56124) +- CockroachDB does not support the `@` operator. Instead of using `@` in spatial expressions, we recommend using the inverse, with `~`. For example, instead of `a @ b`, use `b ~ a`. #56124 -- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). [#55903](https://github.com/cockroachdb/cockroach/issues/55903) +- CockroachDB does not yet support [`INSERT`]({% link {{ page.version.version }}/insert.md %})s into the [`spatial_ref_sys` table]({% link {{ page.version.version }}/architecture/glossary.md %}#spatial-system-tables). This limitation also blocks the [`ogr2ogr -f PostgreSQL` file conversion command](https://gdal.org/programs/ogr2ogr.html#cmdoption-ogr2ogr-f). #55903 -- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). [#55227](https://github.com/cockroachdb/cockroach/issues/55227) +- CockroachDB does not yet support [k-nearest neighbors](https://wikipedia.org/wiki/K-nearest_neighbors_algorithm). #55227 -- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). [#56492](https://github.com/cockroachdb/cockroach/issues/56492) +- CockroachDB does not support using [schema name prefixes]({% link {{ page.version.version }}/sql-name-resolution.md %}#how-name-resolution-works) to refer to [data types]({% link {{ page.version.version }}/data-types.md %}) with type modifiers (e.g., `public.geometry(linestring, 4326)`). Instead, use fully-unqualified names to refer to data types with type modifiers (e.g., `geometry(linestring,4326)`). #56492 - {% include {{ page.version.version }}/known-limitations/srid-4326-limitations.md %} @@ -354,9 +354,9 @@ Refer to [`OID` best practices]({% link {{ page.version.version }}/oid.md %}#bes - {% include {{page.version.version}}/cdc/avro-udt-composite.md %} - {% include {{page.version.version}}/cdc/csv-udt-composite.md %} -- Updating subfields of composite types using dot syntax results in a syntax error. [#102984](https://github.com/cockroachdb/cockroach/issues/102984) +- Updating subfields of composite types using dot syntax results in a syntax error. #102984 -- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). [#114687](https://github.com/cockroachdb/cockroach/issues/114687) +- Tuple elements cannot be accessed without enclosing the [composite variable]({% link {{ page.version.version }}/create-type.md %}#create-a-composite-data-type) name in parentheses. For example, `(OLD).column` and `(NEW).column` when used in [triggers]({% link {{ page.version.version }}/triggers.md %}). #114687 #### `ALTER TYPE` limitations @@ -406,7 +406,7 @@ However, if there is no host at the target IP address, or if a firewall rule blo - Configure any active network firewalls to allow node-to-node traffic. - Verify that orchestration tools (e.g., Kubernetes) are configured to use the correct network connection information. -[#53410](https://github.com/cockroachdb/cockroach/issues/53410) +#53410 #### No guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` if `node decommission` is interrupted @@ -415,7 +415,7 @@ There is no guaranteed state switch from `DECOMMISSIONING` to `DECOMMISSIONED` i - The `cockroach node decommission --wait-all` command was run and then interrupted - The `cockroach node decommission --wait=none` command was run -This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. [#94430](https://github.com/cockroachdb/cockroach/issues/94430) +This is because the state flip is effected by the CLI program at the end. Only the CLI (or its underlying API call) is able to finalize the "decommissioned" state. If the command is interrupted, or `--wait=none` is used, the state will only flip to "decommissioned" when the CLI program is run again after decommissioning has done all its work. #94430 #### Simultaneous client connections and running queries on a single node @@ -451,7 +451,7 @@ As a workaround, [execute the file from the command line]({% link {{ page.versio #### Spatial features disabled for ARM Macs -[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. [GitHub tracking issue](https://github.com/cockroachdb/cockroach/issues/93161) +[Spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the [GEOS](https://libgeos.org/) libraries. Users needing spatial features on an ARM Mac may instead [use Rosetta](https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta) to [run the Intel binary]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#install-binary) or use the [Docker image]({% link {{ page.version.version }}/install-cockroachdb-mac.md %}#use-docker) distribution. GitHub tracking issue #### Logging system limitations @@ -496,7 +496,7 @@ Accessing the DB Console for a secure cluster now requires login information (i. The [`COMMENT ON`]({% link {{ page.version.version }}/comment-on.md %}) statement associates comments to databases, tables, or columns. However, the internal table (`system.comments`) in which these comments are stored is not captured by a [`BACKUP`]({% link {{ page.version.version }}/backup.md %}) of a table or database. -As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. [#44396](https://github.com/cockroachdb/cockroach/issues/44396) +As a workaround, take a cluster backup instead, as the `system.comments` table is included in cluster backups. #44396 #### `SHOW BACKUP` does not support symlinks for nodelocal @@ -537,8 +537,8 @@ Change data capture (CDC) provides efficient, distributed, row-level changefeeds The SQL optimizer has limitations under certain isolation levels: -- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. [#114737](https://github.com/cockroachdb/cockroach/issues/114737) -- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. [#116836](https://github.com/cockroachdb/cockroach/issues/116836) +- The new implementation of `SELECT FOR UPDATE` is not yet the default setting under `SERIALIZABLE` isolation. It can be used under `SERIALIZABLE` isolation by setting the `optimizer_use_lock_op_for_serializable` [session setting]({% link {{ page.version.version }}/session-variables.md %}) to `true`. #114737 +- `SELECT FOR UPDATE` does not lock completely-`NULL` column families in multi-column-family tables. #116836 #### Statistics limitations @@ -547,7 +547,7 @@ The SQL optimizer has limitations under certain isolation levels: #### Incorrect query plans for partitions with `NULL` values -In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. [#82774](https://github.com/cockroachdb/cockroach/issues/82774) +In cases where the partition definition includes a comparison with `NULL` and a query constraint, incorrect query plans are returned. However, this case uses non-standard partitioning which defines partitions which could never hold values, so it is not likely to occur in production environments. #82774 #### Vectorized engine limitations @@ -555,15 +555,15 @@ In cases where the partition definition includes a comparison with `NULL` and a #### `transaction_rows_read_err` and `transaction_rows_written_err` do not halt query execution -The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. [#70473](https://github.com/cockroachdb/cockroach/issues/70473) +The `transaction_rows_read_err` and `transaction_rows_written_err` [session settings]({% link {{ page.version.version }}/set-vars.md %}) limit the number of rows read or written by a single [transaction]({% link {{ page.version.version }}/transactions.md %}#limit-the-number-of-rows-written-or-read-in-a-transaction). These session settings will fail the transaction with an error, but not until the current query finishes executing and the results have been returned to the client. #70473 #### `sql.guardrails.max_row_size_err` misses indexed virtual computed columns -The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. [#69540](https://github.com/cockroachdb/cockroach/issues/69540) +The `sql.guardrails.max_row_size_err` [cluster setting]({% link {{ page.version.version }}/cluster-settings.md %}) misses large rows caused by indexed virtual computed columns. This is because the guardrail only checks the size of primary key rows, not secondary index rows. #69540 #### Using `LIKE...ESCAPE` in `WHERE` and `HAVING` constraints -CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. [#30192](https://github.com/cockroachdb/cockroach/issues/30192) +CockroachDB tries to optimize most comparisons operators in `WHERE` and `HAVING` clauses into constraints on SQL indexes by only accessing selected rows. This is done for `LIKE` clauses when a common prefix for all selected rows can be determined in the search pattern (e.g., `... LIKE 'Joe%'`). However, this optimization is not yet available if the `ESCAPE` keyword is also used. #30192 #### Import with a high amount of disk contention @@ -718,7 +718,7 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. (54 rows) ``` -[#59649](https://github.com/cockroachdb/cockroach/issues/59649) +#59649 #### Locality optimized search limitations @@ -731,4 +731,4 @@ UNION ALL SELECT * FROM t1 LEFT JOIN t2 ON st_contains(t1.geom, t2.geom) AND t2. #### Inverted join for `tsvector` and `tsquery` types is not supported -CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. [#102731](https://github.com/cockroachdb/cockroach/issues/102731) +CockroachDB cannot index-accelerate queries with `@@` predicates when both sides of the operator are variables. #102731 diff --git a/src/current/v25.3/kubernetes-performance.md b/src/current/v25.3/kubernetes-performance.md index 6fa4d534442..cfe4aec6725 100644 --- a/src/current/v25.3/kubernetes-performance.md +++ b/src/current/v25.3/kubernetes-performance.md @@ -24,9 +24,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -336,7 +336,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v25.3/log-formats.md b/src/current/v25.3/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v25.3/log-formats.md +++ b/src/current/v25.3/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.3/logging.md b/src/current/v25.3/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v25.3/logging.md +++ b/src/current/v25.3/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.3/migrate-from-avro.md b/src/current/v25.3/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v25.3/migrate-from-avro.md +++ b/src/current/v25.3/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v25.3/monitor-cockroachdb-kubernetes.md b/src/current/v25.3/monitor-cockroachdb-kubernetes.md index e53bde93041..133923f777a 100644 --- a/src/current/v25.3/monitor-cockroachdb-kubernetes.md +++ b/src/current/v25.3/monitor-cockroachdb-kubernetes.md @@ -132,7 +132,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -181,14 +181,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -218,12 +218,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -248,12 +248,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v25.3/monitor-cockroachdb-operator.md b/src/current/v25.3/monitor-cockroachdb-operator.md index e44c73c252d..7555d79b787 100644 --- a/src/current/v25.3/monitor-cockroachdb-operator.md +++ b/src/current/v25.3/monitor-cockroachdb-operator.md @@ -76,7 +76,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ 1. Apply the Prometheus manifest. This creates the various objects necessary to run a Prometheus instance: @@ -119,13 +119,13 @@ For more details on using the Prometheus UI, see their [official documentation]( ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our [alertmanager-config.yaml](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: +1. Download our [alertmanager-config.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -152,12 +152,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [alertmanager.yaml](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [alertmanager.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ shell alertmanager.monitoring.coreos.com/cockroachdb created @@ -180,12 +180,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ shell prometheusrule.monitoring.coreos.com/prometheus-cockroachdb-rules created diff --git a/src/current/v25.3/monitor-cockroachdb-with-prometheus.md b/src/current/v25.3/monitor-cockroachdb-with-prometheus.md index 2a9ff6f04ae..f2ca3db97c8 100644 --- a/src/current/v25.3/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v25.3/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v25.3/monitoring-and-alerting.md b/src/current/v25.3/monitoring-and-alerting.md index fb1a926937d..1650170a201 100644 --- a/src/current/v25.3/monitoring-and-alerting.md +++ b/src/current/v25.3/monitoring-and-alerting.md @@ -1090,7 +1090,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1099,7 +1099,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its Prometheus endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1107,7 +1107,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's Prometheus endpoint output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1115,7 +1115,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1131,7 +1131,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1139,7 +1139,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v25.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v25.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 55f144f4d74..34fcca4fb4a 100644 --- a/src/current/v25.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v25.3/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v25.3/postgresql-compatibility.md b/src/current/v25.3/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v25.3/postgresql-compatibility.md +++ b/src/current/v25.3/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v25.3/query-spatial-data.md b/src/current/v25.3/query-spatial-data.md index 6a32054d268..644aa487f81 100644 --- a/src/current/v25.3/query-spatial-data.md +++ b/src/current/v25.3/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, see [Known Limitations]({% link {{ page.version.version }}/known-limitations.md %}#spatial-support-limitations). diff --git a/src/current/v25.3/read-committed.md b/src/current/v25.3/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v25.3/read-committed.md +++ b/src/current/v25.3/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v25.3/restore.md b/src/current/v25.3/restore.md index 794b3a338d2..8348ae9adb7 100644 --- a/src/current/v25.3/restore.md +++ b/src/current/v25.3/restore.md @@ -217,11 +217,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v25.3/schedule-cockroachdb-kubernetes.md b/src/current/v25.3/schedule-cockroachdb-kubernetes.md index dd67d369c5e..5972aa3012e 100644 --- a/src/current/v25.3/schedule-cockroachdb-kubernetes.md +++ b/src/current/v25.3/schedule-cockroachdb-kubernetes.md @@ -118,7 +118,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the {{ site.data.products.public-operator }}'s custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v25.3/schedule-cockroachdb-operator.md b/src/current/v25.3/schedule-cockroachdb-operator.md index 4630ea6d9d1..25f040d1a91 100644 --- a/src/current/v25.3/schedule-cockroachdb-operator.md +++ b/src/current/v25.3/schedule-cockroachdb-operator.md @@ -88,7 +88,7 @@ Specify pod affinities and node anti-affinities in `cockroachdb.crdbCluster.podT The {{ site.data.products.cockroachdb-operator }} hard-codes the pod template to only allow one pod per Kubernetes node. If you need to override this value, you can [override the pod template]({% link {{ page.version.version }}/override-templates-cockroachdb-operator.md %}#override-the-default-pod). -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. ~~~ yaml cockroachdb: diff --git a/src/current/v25.3/spatial-tutorial.md b/src/current/v25.3/spatial-tutorial.md index 72d45bf1fdd..fd26777f785 100644 --- a/src/current/v25.3/spatial-tutorial.md +++ b/src/current/v25.3/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v25.3/sql-feature-support.md b/src/current/v25.3/sql-feature-support.md index df9fa0711d8..d51a2393894 100644 --- a/src/current/v25.3/sql-feature-support.md +++ b/src/current/v25.3/sql-feature-support.md @@ -194,4 +194,4 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v25.3/sql-name-resolution.md b/src/current/v25.3/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v25.3/sql-name-resolution.md +++ b/src/current/v25.3/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v25.3/st_union.md b/src/current/v25.3/st_union.md index df4a9429bc1..1cb4223c0e7 100644 --- a/src/current/v25.3/st_union.md +++ b/src/current/v25.3/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.3/temporary-tables.md b/src/current/v25.3/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v25.3/temporary-tables.md +++ b/src/current/v25.3/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.3/views.md b/src/current/v25.3/views.md index 6b1a667a470..7261ba8e7bd 100644 --- a/src/current/v25.3/views.md +++ b/src/current/v25.3/views.md @@ -653,7 +653,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.4/admission-control.md b/src/current/v25.4/admission-control.md index dd72fb6dcd4..eee14c739fe 100644 --- a/src/current/v25.4/admission-control.md +++ b/src/current/v25.4/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -166,6 +166,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v25.4/architecture/sql-layer.md b/src/current/v25.4/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v25.4/architecture/sql-layer.md +++ b/src/current/v25.4/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v25.4/cluster-settings.md b/src/current/v25.4/cluster-settings.md index 63169bea6cf..775dca5445c 100644 --- a/src/current/v25.4/cluster-settings.md +++ b/src/current/v25.4/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v25.4/cluster-setup-troubleshooting.md b/src/current/v25.4/cluster-setup-troubleshooting.md index e7c16577c14..23a318fa212 100644 --- a/src/current/v25.4/cluster-setup-troubleshooting.md +++ b/src/current/v25.4/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. @@ -225,7 +225,7 @@ Again, firewalls or hostname issues can cause any of these steps to fail. #### TCP connection lingering -If there is no host at the target IP address, or if a firewall rule blocks traffic to the target address and port, a [TCP handshake can linger](https://github.com/cockroachdb/cockroach/issues/53410) while the client network stack waits for a TCP packet in response to network requests. +If there is no host at the target IP address, or if a firewall rule blocks traffic to the target address and port, a TCP handshake can linger while the client network stack waits for a TCP packet in response to network requests. **Explanation:** CockroachDB servers rely on the network to report when a TCP connection fails. In most scenarios when a connection fails, the network immediately reports a connection failure, resulting in a `Connection refused` error. However, the scenario described above can cause connections to hang instead of failing immediately. diff --git a/src/current/v25.4/cockroachdb-feature-availability.md b/src/current/v25.4/cockroachdb-feature-availability.md index d054b6799b3..3913a983dc4 100644 --- a/src/current/v25.4/cockroachdb-feature-availability.md +++ b/src/current/v25.4/cockroachdb-feature-availability.md @@ -198,7 +198,7 @@ Example: ### Check for constraint violations with `SCRUB` -Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see [cockroachdb/cockroach#10425](https://github.com/cockroachdb/cockroach/issues/10425) for details. +Checks the consistency of [`UNIQUE`]({% link {{ page.version.version }}/unique.md %}) indexes, [`CHECK`]({% link {{ page.version.version }}/check.md %}) constraints, and more. Partially implemented; see cockroachdb/cockroach#10425 for details. {{site.data.alerts.callout_info}} This example uses the `users` table from our open-source, fictional peer-to-peer vehicle-sharing application, [MovR]({% link {{ page.version.version }}/movr.md %}). @@ -229,7 +229,7 @@ The [`SHOW RANGE ... FOR ROW`]({% link {{ page.version.version }}/show-range-for ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v25.4/common-table-expressions.md b/src/current/v25.4/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v25.4/common-table-expressions.md +++ b/src/current/v25.4/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v25.4/configure-replication-zones.md b/src/current/v25.4/configure-replication-zones.md index ced8cef3ded..9a804538983 100644 --- a/src/current/v25.4/configure-replication-zones.md +++ b/src/current/v25.4/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v25.4/create-sequence.md b/src/current/v25.4/create-sequence.md index 72bc62b717b..a0794958576 100644 --- a/src/current/v25.4/create-sequence.md +++ b/src/current/v25.4/create-sequence.md @@ -60,7 +60,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.4/disaster-recovery-planning.md b/src/current/v25.4/disaster-recovery-planning.md index de593915cf0..762eb549920 100644 --- a/src/current/v25.4/disaster-recovery-planning.md +++ b/src/current/v25.4/disaster-recovery-planning.md @@ -322,7 +322,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v25.4/eventlog.md b/src/current/v25.4/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v25.4/eventlog.md +++ b/src/current/v25.4/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.4/example-apps.md b/src/current/v25.4/example-apps.md index bea3cfd1876..0d5ba010933 100644 --- a/src/current/v25.4/example-apps.md +++ b/src/current/v25.4/example-apps.md @@ -14,7 +14,7 @@ Click the links in the tables below to see simple but complete example applicati If you are looking to do a specific task such as connect to the database, insert data, or run multi-statement transactions, see [this list of tasks](#tasks). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. Note that tools with [**community-level** support]({% link {{ page.version.version }}/community-tooling.md %}) have been tested or developed by the CockroachDB community, but are not officially supported by Cockroach Labs. If you encounter problems with using these tools, please contact the maintainer of the tool with details. {{site.data.alerts.end}} diff --git a/src/current/v25.4/file-an-issue.md b/src/current/v25.4/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v25.4/file-an-issue.md +++ b/src/current/v25.4/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v25.4/fips.md b/src/current/v25.4/fips.md index f82d1f995a3..1b5b42dddb5 100644 --- a/src/current/v25.4/fips.md +++ b/src/current/v25.4/fips.md @@ -76,7 +76,7 @@ A system must have FIPS mode enabled in the kernel before it can run the FIPS-re If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 8 Docker image](https://catalog.redhat.com/software/containers/ubi8/ubi/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - Your Dockerfile must install OpenSSL before it starts the `cockroach` binary. - You must enable FIPS mode on the Docker host kernel before it can run containers with FIPS mode enabled. The FIPS-ready CockroachDB Docker image must run with FIPS mode enabled. To enable FIPS mode in the Docker host kernel, refer to [Enable FIPS mode](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/security_hardening/assembly_installing-a-rhel-8-system-with-fips-mode-enabled_security-hardening) in Red Hat's documentation. To verify that FIPS mode is enabled, refer to [Verify that the kernel enforces FIPS mode](#verify-that-the-kernel-enforces-fips-mode). diff --git a/src/current/v25.4/foreign-key.md b/src/current/v25.4/foreign-key.md index b9e6cc50f5c..da7def88ff1 100644 --- a/src/current/v25.4/foreign-key.md +++ b/src/current/v25.4/foreign-key.md @@ -92,7 +92,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v25.4/functions-and-operators.md b/src/current/v25.4/functions-and-operators.md index 3da97a7f5fd..80c1485ede8 100644 --- a/src/current/v25.4/functions-and-operators.md +++ b/src/current/v25.4/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v25.4/install-client-drivers.md b/src/current/v25.4/install-client-drivers.md index 984671a0dfc..4d919756ce3 100644 --- a/src/current/v25.4/install-client-drivers.md +++ b/src/current/v25.4/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v25.4/install-cockroachdb-mac.md b/src/current/v25.4/install-cockroachdb-mac.md index 19ea059cbf1..7eb7cdb6851 100644 --- a/src/current/v25.4/install-cockroachdb-mac.md +++ b/src/current/v25.4/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v25.4/intellij-idea.md b/src/current/v25.4/intellij-idea.md index de46249320f..ef1ce93b9d4 100644 --- a/src/current/v25.4/intellij-idea.md +++ b/src/current/v25.4/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v25.4/kubernetes-performance.md b/src/current/v25.4/kubernetes-performance.md index 6fa4d534442..cfe4aec6725 100644 --- a/src/current/v25.4/kubernetes-performance.md +++ b/src/current/v25.4/kubernetes-performance.md @@ -24,9 +24,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -336,7 +336,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v25.4/log-formats.md b/src/current/v25.4/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v25.4/log-formats.md +++ b/src/current/v25.4/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.4/logging.md b/src/current/v25.4/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v25.4/logging.md +++ b/src/current/v25.4/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v25.4/migrate-from-avro.md b/src/current/v25.4/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v25.4/migrate-from-avro.md +++ b/src/current/v25.4/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v25.4/monitor-cockroachdb-kubernetes.md b/src/current/v25.4/monitor-cockroachdb-kubernetes.md index b8ab1dafdac..39e0956e1d6 100644 --- a/src/current/v25.4/monitor-cockroachdb-kubernetes.md +++ b/src/current/v25.4/monitor-cockroachdb-kubernetes.md @@ -132,7 +132,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -181,14 +181,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -218,12 +218,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -248,12 +248,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v25.4/monitor-cockroachdb-operator.md b/src/current/v25.4/monitor-cockroachdb-operator.md index 5bf93ce305d..3ccfb20a588 100644 --- a/src/current/v25.4/monitor-cockroachdb-operator.md +++ b/src/current/v25.4/monitor-cockroachdb-operator.md @@ -76,7 +76,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ 1. Apply the Prometheus manifest. This creates the various objects necessary to run a Prometheus instance: @@ -119,13 +119,13 @@ For more details on using the Prometheus UI, see their [official documentation]( ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our [alertmanager-config.yaml](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: +1. Download our [alertmanager-config.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -152,12 +152,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [alertmanager.yaml](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [alertmanager.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ shell alertmanager.monitoring.coreos.com/cockroachdb created @@ -180,12 +180,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ shell prometheusrule.monitoring.coreos.com/prometheus-cockroachdb-rules created diff --git a/src/current/v25.4/monitor-cockroachdb-with-prometheus.md b/src/current/v25.4/monitor-cockroachdb-with-prometheus.md index 2a9ff6f04ae..f2ca3db97c8 100644 --- a/src/current/v25.4/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v25.4/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v25.4/monitoring-and-alerting.md b/src/current/v25.4/monitoring-and-alerting.md index cc29283cd40..a3353b745a3 100644 --- a/src/current/v25.4/monitoring-and-alerting.md +++ b/src/current/v25.4/monitoring-and-alerting.md @@ -1090,7 +1090,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1099,7 +1099,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its Prometheus endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1107,7 +1107,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's Prometheus endpoint output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1115,7 +1115,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1131,7 +1131,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1139,7 +1139,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v25.4/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v25.4/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 55f144f4d74..34fcca4fb4a 100644 --- a/src/current/v25.4/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v25.4/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v25.4/postgresql-compatibility.md b/src/current/v25.4/postgresql-compatibility.md index 15eff6fdc35..24b3639b1de 100644 --- a/src/current/v25.4/postgresql-compatibility.md +++ b/src/current/v25.4/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v25.4/query-spatial-data.md b/src/current/v25.4/query-spatial-data.md index 26710368ce2..60dc0f49704 100644 --- a/src/current/v25.4/query-spatial-data.md +++ b/src/current/v25.4/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, refer to [Known Limitations]({% link {{ page.version.version }}/spatial-data-overview.md %}#known-limitations). diff --git a/src/current/v25.4/read-committed.md b/src/current/v25.4/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v25.4/read-committed.md +++ b/src/current/v25.4/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v25.4/restore.md b/src/current/v25.4/restore.md index 794b3a338d2..8348ae9adb7 100644 --- a/src/current/v25.4/restore.md +++ b/src/current/v25.4/restore.md @@ -217,11 +217,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v25.4/schedule-cockroachdb-kubernetes.md b/src/current/v25.4/schedule-cockroachdb-kubernetes.md index dd67d369c5e..5972aa3012e 100644 --- a/src/current/v25.4/schedule-cockroachdb-kubernetes.md +++ b/src/current/v25.4/schedule-cockroachdb-kubernetes.md @@ -118,7 +118,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the {{ site.data.products.public-operator }}'s custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v25.4/schedule-cockroachdb-operator.md b/src/current/v25.4/schedule-cockroachdb-operator.md index 4630ea6d9d1..25f040d1a91 100644 --- a/src/current/v25.4/schedule-cockroachdb-operator.md +++ b/src/current/v25.4/schedule-cockroachdb-operator.md @@ -88,7 +88,7 @@ Specify pod affinities and node anti-affinities in `cockroachdb.crdbCluster.podT The {{ site.data.products.cockroachdb-operator }} hard-codes the pod template to only allow one pod per Kubernetes node. If you need to override this value, you can [override the pod template]({% link {{ page.version.version }}/override-templates-cockroachdb-operator.md %}#override-the-default-pod). -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. ~~~ yaml cockroachdb: diff --git a/src/current/v25.4/spatial-tutorial.md b/src/current/v25.4/spatial-tutorial.md index c0038c1de81..c7ed7ad13e9 100644 --- a/src/current/v25.4/spatial-tutorial.md +++ b/src/current/v25.4/spatial-tutorial.md @@ -966,7 +966,7 @@ Time: 1.447s total (execution 1.446s / network 0.000s) Unfortunately, this query is a bit slower than you would like: about 1.5 seconds on a single-node [`cockroach demo`]({% link {{ page.version.version }}/cockroach-demo.md %}) cluster on a laptop. There are several reasons for this: 1. You haven't created any indexes at all yet. The query is likely to be doing full table scans, which you will need to hunt down with [`EXPLAIN`]({% link {{ page.version.version }}/explain.md %}). -1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on [cockroachdb/cockroach#55227](https://github.com/cockroachdb/cockroach/issues/55227). +1. CockroachDB does not yet have built-in support for index-based nearest neighbor queries. If this feature is important to you, please comment with some information about your use case on cockroachdb/cockroach#55227. Let's look at the `EXPLAIN` output to see if there is something that can be done to improve this query's performance: diff --git a/src/current/v25.4/sql-feature-support.md b/src/current/v25.4/sql-feature-support.md index df9fa0711d8..d51a2393894 100644 --- a/src/current/v25.4/sql-feature-support.md +++ b/src/current/v25.4/sql-feature-support.md @@ -194,4 +194,4 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v25.4/sql-name-resolution.md b/src/current/v25.4/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v25.4/sql-name-resolution.md +++ b/src/current/v25.4/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v25.4/st_union.md b/src/current/v25.4/st_union.md index 5b4027bd0e0..e4681c47456 100644 --- a/src/current/v25.4/st_union.md +++ b/src/current/v25.4/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.4/temporary-tables.md b/src/current/v25.4/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v25.4/temporary-tables.md +++ b/src/current/v25.4/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v25.4/views.md b/src/current/v25.4/views.md index 7aaea1c54a9..d31a1314b5c 100644 --- a/src/current/v25.4/views.md +++ b/src/current/v25.4/views.md @@ -647,7 +647,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.1/admission-control.md b/src/current/v26.1/admission-control.md index e29f44ffc21..3c07c649c81 100644 --- a/src/current/v26.1/admission-control.md +++ b/src/current/v26.1/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -166,6 +166,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v26.1/architecture/sql-layer.md b/src/current/v26.1/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v26.1/architecture/sql-layer.md +++ b/src/current/v26.1/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v26.1/cluster-settings.md b/src/current/v26.1/cluster-settings.md index 63169bea6cf..775dca5445c 100644 --- a/src/current/v26.1/cluster-settings.md +++ b/src/current/v26.1/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v26.1/cluster-setup-troubleshooting.md b/src/current/v26.1/cluster-setup-troubleshooting.md index e7c16577c14..23a318fa212 100644 --- a/src/current/v26.1/cluster-setup-troubleshooting.md +++ b/src/current/v26.1/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. @@ -225,7 +225,7 @@ Again, firewalls or hostname issues can cause any of these steps to fail. #### TCP connection lingering -If there is no host at the target IP address, or if a firewall rule blocks traffic to the target address and port, a [TCP handshake can linger](https://github.com/cockroachdb/cockroach/issues/53410) while the client network stack waits for a TCP packet in response to network requests. +If there is no host at the target IP address, or if a firewall rule blocks traffic to the target address and port, a TCP handshake can linger while the client network stack waits for a TCP packet in response to network requests. **Explanation:** CockroachDB servers rely on the network to report when a TCP connection fails. In most scenarios when a connection fails, the network immediately reports a connection failure, resulting in a `Connection refused` error. However, the scenario described above can cause connections to hang instead of failing immediately. diff --git a/src/current/v26.1/cockroachdb-feature-availability.md b/src/current/v26.1/cockroachdb-feature-availability.md index 80782e931f8..e71911e865b 100644 --- a/src/current/v26.1/cockroachdb-feature-availability.md +++ b/src/current/v26.1/cockroachdb-feature-availability.md @@ -210,7 +210,7 @@ The [`SHOW RANGE ... FOR ROW`]({% link {{ page.version.version }}/show-range-for ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v26.1/common-table-expressions.md b/src/current/v26.1/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v26.1/common-table-expressions.md +++ b/src/current/v26.1/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v26.1/configure-replication-zones.md b/src/current/v26.1/configure-replication-zones.md index 05f564ed4c9..fa434a821d3 100644 --- a/src/current/v26.1/configure-replication-zones.md +++ b/src/current/v26.1/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v26.1/create-sequence.md b/src/current/v26.1/create-sequence.md index 72bc62b717b..a0794958576 100644 --- a/src/current/v26.1/create-sequence.md +++ b/src/current/v26.1/create-sequence.md @@ -60,7 +60,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.1/disaster-recovery-planning.md b/src/current/v26.1/disaster-recovery-planning.md index de593915cf0..762eb549920 100644 --- a/src/current/v26.1/disaster-recovery-planning.md +++ b/src/current/v26.1/disaster-recovery-planning.md @@ -322,7 +322,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v26.1/eventlog.md b/src/current/v26.1/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v26.1/eventlog.md +++ b/src/current/v26.1/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v26.1/file-an-issue.md b/src/current/v26.1/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v26.1/file-an-issue.md +++ b/src/current/v26.1/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v26.1/fips.md b/src/current/v26.1/fips.md index f1d6cf8c51d..9b5b26f2576 100644 --- a/src/current/v26.1/fips.md +++ b/src/current/v26.1/fips.md @@ -84,7 +84,7 @@ The FIPS-ready CockroachDB Docker images are based on [Red Hat's Universal Base If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 10](https://catalog.redhat.com/software/containers/ubi10/ubi-minimal/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - The FIPS-ready binary includes the FIPS 140-3 Go Cryptographic Module and does not require additional system libraries to be installed. diff --git a/src/current/v26.1/foreign-key.md b/src/current/v26.1/foreign-key.md index b9e6cc50f5c..da7def88ff1 100644 --- a/src/current/v26.1/foreign-key.md +++ b/src/current/v26.1/foreign-key.md @@ -92,7 +92,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v26.1/functions-and-operators.md b/src/current/v26.1/functions-and-operators.md index 3da97a7f5fd..80c1485ede8 100644 --- a/src/current/v26.1/functions-and-operators.md +++ b/src/current/v26.1/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v26.1/install-client-drivers.md b/src/current/v26.1/install-client-drivers.md index 75b39685574..1a20d6e779f 100644 --- a/src/current/v26.1/install-client-drivers.md +++ b/src/current/v26.1/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v26.1/install-cockroachdb-mac.md b/src/current/v26.1/install-cockroachdb-mac.md index 116d9e9901e..40dff5896d1 100644 --- a/src/current/v26.1/install-cockroachdb-mac.md +++ b/src/current/v26.1/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v26.1/intellij-idea.md b/src/current/v26.1/intellij-idea.md index 1ce61a25e53..8f8928a0b69 100644 --- a/src/current/v26.1/intellij-idea.md +++ b/src/current/v26.1/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v26.1/kubernetes-performance.md b/src/current/v26.1/kubernetes-performance.md index 6fa4d534442..cfe4aec6725 100644 --- a/src/current/v26.1/kubernetes-performance.md +++ b/src/current/v26.1/kubernetes-performance.md @@ -24,9 +24,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -336,7 +336,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v26.1/log-formats.md b/src/current/v26.1/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v26.1/log-formats.md +++ b/src/current/v26.1/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v26.1/logging.md b/src/current/v26.1/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v26.1/logging.md +++ b/src/current/v26.1/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v26.1/migrate-from-avro.md b/src/current/v26.1/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v26.1/migrate-from-avro.md +++ b/src/current/v26.1/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v26.1/monitor-cockroachdb-kubernetes.md b/src/current/v26.1/monitor-cockroachdb-kubernetes.md index 581d156b4fd..760facc2268 100644 --- a/src/current/v26.1/monitor-cockroachdb-kubernetes.md +++ b/src/current/v26.1/monitor-cockroachdb-kubernetes.md @@ -132,7 +132,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -181,14 +181,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -218,12 +218,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -248,12 +248,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v26.1/monitor-cockroachdb-operator.md b/src/current/v26.1/monitor-cockroachdb-operator.md index daf5e678e7b..8b3c613a40c 100644 --- a/src/current/v26.1/monitor-cockroachdb-operator.md +++ b/src/current/v26.1/monitor-cockroachdb-operator.md @@ -76,7 +76,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ 1. Apply the Prometheus manifest. This creates the various objects necessary to run a Prometheus instance: @@ -119,13 +119,13 @@ For more details on using the Prometheus UI, see their [official documentation]( ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our [alertmanager-config.yaml](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: +1. Download our [alertmanager-config.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -152,12 +152,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [alertmanager.yaml](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [alertmanager.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ shell alertmanager.monitoring.coreos.com/cockroachdb created @@ -180,12 +180,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ shell prometheusrule.monitoring.coreos.com/prometheus-cockroachdb-rules created diff --git a/src/current/v26.1/monitor-cockroachdb-with-prometheus.md b/src/current/v26.1/monitor-cockroachdb-with-prometheus.md index 2a9ff6f04ae..f2ca3db97c8 100644 --- a/src/current/v26.1/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v26.1/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Download the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB: +1. Download the starter Grafana dashboards for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v26.1/monitoring-and-alerting.md b/src/current/v26.1/monitoring-and-alerting.md index 46dfaf780ea..3a285c2115b 100644 --- a/src/current/v26.1/monitoring-and-alerting.md +++ b/src/current/v26.1/monitoring-and-alerting.md @@ -1090,7 +1090,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1099,7 +1099,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its Prometheus endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1107,7 +1107,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's Prometheus endpoint output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1115,7 +1115,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1131,7 +1131,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1139,7 +1139,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v26.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v26.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 55f144f4d74..34fcca4fb4a 100644 --- a/src/current/v26.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v26.1/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v26.1/postgresql-compatibility.md b/src/current/v26.1/postgresql-compatibility.md index ad110c9e4ef..d6be88a52d4 100644 --- a/src/current/v26.1/postgresql-compatibility.md +++ b/src/current/v26.1/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v26.1/query-spatial-data.md b/src/current/v26.1/query-spatial-data.md index a259dd87082..d45e8b26b6f 100644 --- a/src/current/v26.1/query-spatial-data.md +++ b/src/current/v26.1/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, refer to [Known Limitations]({% link {{ page.version.version }}/spatial-data-overview.md %}#known-limitations). diff --git a/src/current/v26.1/read-committed.md b/src/current/v26.1/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v26.1/read-committed.md +++ b/src/current/v26.1/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v26.1/restore.md b/src/current/v26.1/restore.md index 32943361078..b9ae28cea2a 100644 --- a/src/current/v26.1/restore.md +++ b/src/current/v26.1/restore.md @@ -215,11 +215,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v26.1/schedule-cockroachdb-kubernetes.md b/src/current/v26.1/schedule-cockroachdb-kubernetes.md index dd67d369c5e..5972aa3012e 100644 --- a/src/current/v26.1/schedule-cockroachdb-kubernetes.md +++ b/src/current/v26.1/schedule-cockroachdb-kubernetes.md @@ -118,7 +118,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the {{ site.data.products.public-operator }}'s custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v26.1/schedule-cockroachdb-operator.md b/src/current/v26.1/schedule-cockroachdb-operator.md index 4630ea6d9d1..25f040d1a91 100644 --- a/src/current/v26.1/schedule-cockroachdb-operator.md +++ b/src/current/v26.1/schedule-cockroachdb-operator.md @@ -88,7 +88,7 @@ Specify pod affinities and node anti-affinities in `cockroachdb.crdbCluster.podT The {{ site.data.products.cockroachdb-operator }} hard-codes the pod template to only allow one pod per Kubernetes node. If you need to override this value, you can [override the pod template]({% link {{ page.version.version }}/override-templates-cockroachdb-operator.md %}#override-the-default-pod). -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. ~~~ yaml cockroachdb: diff --git a/src/current/v26.1/sql-feature-support.md b/src/current/v26.1/sql-feature-support.md index df9fa0711d8..d51a2393894 100644 --- a/src/current/v26.1/sql-feature-support.md +++ b/src/current/v26.1/sql-feature-support.md @@ -194,4 +194,4 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v26.1/sql-name-resolution.md b/src/current/v26.1/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v26.1/sql-name-resolution.md +++ b/src/current/v26.1/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v26.1/st_union.md b/src/current/v26.1/st_union.md index c49acc60e30..6f98d9201b6 100644 --- a/src/current/v26.1/st_union.md +++ b/src/current/v26.1/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.1/temporary-tables.md b/src/current/v26.1/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v26.1/temporary-tables.md +++ b/src/current/v26.1/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.1/views.md b/src/current/v26.1/views.md index 7aaea1c54a9..d31a1314b5c 100644 --- a/src/current/v26.1/views.md +++ b/src/current/v26.1/views.md @@ -647,7 +647,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.2/active-session-history.md b/src/current/v26.2/active-session-history.md index f2ca8113919..fa07f63a7bb 100644 --- a/src/current/v26.2/active-session-history.md +++ b/src/current/v26.2/active-session-history.md @@ -89,7 +89,7 @@ Each sample is attributed to a workload via the `workload_type` and `workload_id | `STATEMENT` | Hex-encoded [statement fingerprint]({% link {{ page.version.version }}/ui-statements-page.md %}#sql-statement-fingerprints) ID | | `JOB` | Decimal [job ID]({% link {{ page.version.version }}/show-jobs.md %}) | | `SYSTEM` | One of the following system task names:
      `LDR`, `RAFT`, `STORELIVENESS`, `RPC_HEARTBEAT`, `NODE_LIVENESS`, `SQL_LIVENESS`, `TIMESERIES`, `RAFT_LOG_TRUNCATION`, `TXN_HEARTBEAT`, `INTENT_RESOLUTION`, `LEASE_ACQUISITION`, `MERGE_QUEUE`, `CIRCUIT_BREAKER_PROBE`, `GC`, `RANGEFEED`, `REPLICATE_QUEUE`, `SPLIT_QUEUE`, `DESCRIPTOR_LEASE` | -| `UNKNOWN` | Unidentified. If you're seeing many unattributed samples for your workload, you may want to [file an issue](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md). | +| `UNKNOWN` | Unidentified. If you're seeing many unattributed samples for your workload, you may want to file an issue. | ### `work_event` columns diff --git a/src/current/v26.2/admission-control.md b/src/current/v26.2/admission-control.md index 345df652f91..755cbd51f1c 100644 --- a/src/current/v26.2/admission-control.md +++ b/src/current/v26.2/admission-control.md @@ -22,7 +22,7 @@ Admission control works on a per-[node]({% link {{ page.version.version }}/archi For more details about how the admission control system works, see: -- The [Admission Control tech note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md). +- [The Admission Control tech note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/admission_control.md). - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). ## Use cases for admission control @@ -167,6 +167,6 @@ The [DB Console Overload dashboard]({% link {{ page.version.version }}/ui-overlo ## See also - The [Overload Dashboard]({% link {{ page.version.version }}/ui-overload-dashboard.md %}) in the [DB Console]({% link {{ page.version.version }}/ui-overview.md %}). -- The [technical note for admission control](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/admission_control.md) for details on the design of the admission control system. +- The technical note for admission control for details on the design of the admission control system. - The blog post [Here's how CockroachDB keeps your database from collapsing under load](https://www.cockroachlabs.com/blog/admission-control-in-cockroachdb/). - The blog post [Rubbing Control Theory on the Go scheduler](https://www.cockroachlabs.com/blog/rubbing-control-theory/). diff --git a/src/current/v26.2/architecture/sql-layer.md b/src/current/v26.2/architecture/sql-layer.md index bfb67a48c05..2605879b93d 100644 --- a/src/current/v26.2/architecture/sql-layer.md +++ b/src/current/v26.2/architecture/sql-layer.md @@ -117,7 +117,7 @@ It's also important––for indexed columns––that this byte encoding preser However, for non-indexed columns (e.g., non-`PRIMARY KEY` columns), CockroachDB instead uses an encoding (known as "value encoding") which consumes less space but does not preserve ordering. -You can find more exhaustive detail in the [Encoding Tech Note](https://github.com/cockroachdb/cockroach/blob/master/docs/tech-notes/encoding.md). +You can find more exhaustive detail in [the Encoding Tech Note](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/tech-notes/encoding.md). ### DistSQL @@ -134,7 +134,7 @@ To run SQL statements in a distributed fashion, we introduce a couple of concept - **Logical plan**: Similar to the AST/`planNode` tree described above, it represents the abstract (non-distributed) data flow through computation stages. - **Physical plan**: A physical plan is conceptually a mapping of the logical plan nodes to physical machines running `cockroach`. Logical plan nodes are replicated and specialized depending on the cluster topology. Like `planNodes` above, these components of the physical plan are scheduled and run on the cluster. -You can find much greater detail in the [DistSQL RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20160421_distributed_sql.md). +You can find much greater detail in [the DistSQL RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20160421_distributed_sql.md). ## Schema changes diff --git a/src/current/v26.2/cluster-settings.md b/src/current/v26.2/cluster-settings.md index 63169bea6cf..775dca5445c 100644 --- a/src/current/v26.2/cluster-settings.md +++ b/src/current/v26.2/cluster-settings.md @@ -26,7 +26,7 @@ These cluster settings have a broad impact on CockroachDB internals and affect a {% include {{page.version.version}}/sql/sql-defaults-cluster-settings-deprecation-notice.md %} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/settings/settings.html %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/settings/settings.html{% endcapture %}{% include {{ cockroach_include }} %} ## View current cluster settings diff --git a/src/current/v26.2/cluster-setup-troubleshooting.md b/src/current/v26.2/cluster-setup-troubleshooting.md index e7c16577c14..23a318fa212 100644 --- a/src/current/v26.2/cluster-setup-troubleshooting.md +++ b/src/current/v26.2/cluster-setup-troubleshooting.md @@ -104,7 +104,7 @@ If you are trying to run all nodes on the same machine, you might get the follow #### Store directory already exists ~~~ -ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /Users/amruta/go/src/github.com/cockroachdb/cockroach/cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable +ERROR: could not cleanup temporary directories from record file: could not lock temporary directory /cockroach-data/cockroach-temp301343769, may still be in use: IO error: While lock file: /cockroach-data/cockroach-temp301343769/TEMP_DIR.LOCK: Resource temporarily unavailable ~~~ **Explanation:** When starting a new node on the same machine, the directory you choose to store the data in also contains metadata identifying the cluster the data came from. This causes conflicts when you've already started a node on the server and then tried to start another cluster using the same directory. @@ -225,7 +225,7 @@ Again, firewalls or hostname issues can cause any of these steps to fail. #### TCP connection lingering -If there is no host at the target IP address, or if a firewall rule blocks traffic to the target address and port, a [TCP handshake can linger](https://github.com/cockroachdb/cockroach/issues/53410) while the client network stack waits for a TCP packet in response to network requests. +If there is no host at the target IP address, or if a firewall rule blocks traffic to the target address and port, a TCP handshake can linger while the client network stack waits for a TCP packet in response to network requests. **Explanation:** CockroachDB servers rely on the network to report when a TCP connection fails. In most scenarios when a connection fails, the network immediately reports a connection failure, resulting in a `Connection refused` error. However, the scenario described above can cause connections to hang instead of failing immediately. diff --git a/src/current/v26.2/cockroachdb-feature-availability.md b/src/current/v26.2/cockroachdb-feature-availability.md index 3cc21c29e27..947250708c8 100644 --- a/src/current/v26.2/cockroachdb-feature-availability.md +++ b/src/current/v26.2/cockroachdb-feature-availability.md @@ -206,7 +206,7 @@ The [`SHOW RANGE ... FOR ROW`]({% link {{ page.version.version }}/show-range-for ### Temporary objects -[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +[Temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}), [temporary views]({% link {{ page.version.version }}/views.md %}#temporary-views), and [temporary sequences]({% link {{ page.version.version }}/create-sequence.md %}#temporary-sequences) are in preview in CockroachDB. If you create too many temporary objects in a session, the performance of DDL operations will degrade. Dropping large numbers of temporary objects in rapid succession can also enqueue many [schema change GC jobs]({% link {{ page.version.version }}/show-jobs.md %}), which may further degrade cluster performance. This performance degradation could persist long after creating the temporary objects. For more details, see cockroachdb/cockroach#46260. To enable temporary objects, set the `experimental_enable_temp_tables` [session variable]({% link {{ page.version.version }}/show-vars.md %}) to `on`. diff --git a/src/current/v26.2/common-table-expressions.md b/src/current/v26.2/common-table-expressions.md index a76f13ff39d..bb43aa48b0c 100644 --- a/src/current/v26.2/common-table-expressions.md +++ b/src/current/v26.2/common-table-expressions.md @@ -446,7 +446,7 @@ SELECT COUNT(*) FROM temp; Because this pattern incurs the overhead of a new scan for each iteration, it is slower per row than a full scan. It is therefore faster than a full scan in cases (such as this one) where many rows are skipped, but is slower if they are not. {{site.data.alerts.callout_info}} -Some recursive CTEs are not not yet optimized. For details, see the [tracking issue](https://github.com/cockroachdb/cockroach/issues/89954). +Some recursive CTEs are not not yet optimized. For details, see the tracking issue. {{site.data.alerts.end}} ## Correlated common table expressions diff --git a/src/current/v26.2/configure-replication-zones.md b/src/current/v26.2/configure-replication-zones.md index 77ca4c675ec..aa59a1802af 100644 --- a/src/current/v26.2/configure-replication-zones.md +++ b/src/current/v26.2/configure-replication-zones.md @@ -98,7 +98,7 @@ For more information, see the following subsections: The hierarchy of inheritance for zone configs can be visualized using the following outline-style diagram, in which each level of indentation denotes an inheritance relationship. -The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +The only exception to this simple inheritance relationship is that due to a known limitation, sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information, see cockroachdb/cockroach#75862. ``` - default @@ -127,7 +127,7 @@ From the whole-system perspective, the hierarchy of schema object zone configs c The following diagram presents the same set of schema objects as the previous outline-style diagram, but using boxes and lines joined with arrows that represent the "top-down" view. -Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see [cockroachdb/cockroach#75862](https://github.com/cockroachdb/cockroach/issues/75862). +Each box represents a schema object in the zone configuration inheritance hierarchy. Each solid line ends in an arrow that points from a parent object to its child object, which will inherit the parent's values unless those values are changed at the child level. The dotted lines between partitions and sub-partitions represent the known limitation mentioned previously that sub-partitions do not inherit their values from their parent partitions. Instead, sub-partitions inherit their values from the parent table. For more information about this limitation, see cockroachdb/cockroach#75862. zone config inheritance diagram diff --git a/src/current/v26.2/create-sequence.md b/src/current/v26.2/create-sequence.md index 72bc62b717b..a0794958576 100644 --- a/src/current/v26.2/create-sequence.md +++ b/src/current/v26.2/create-sequence.md @@ -60,7 +60,7 @@ CockroachDB supports the following [SQL sequence functions]({% link {{ page.vers CockroachDB supports session-scoped temporary sequences. Unlike persistent sequences, temporary sequences can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary sequences on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.2/disaster-recovery-planning.md b/src/current/v26.2/disaster-recovery-planning.md index de593915cf0..762eb549920 100644 --- a/src/current/v26.2/disaster-recovery-planning.md +++ b/src/current/v26.2/disaster-recovery-planning.md @@ -322,7 +322,7 @@ If your cluster is running, you do not have a backup that encapsulates the time If you have corrupted data in a database or table, [restore]({% link {{ page.version.version }}/restore.md %}) the object from a prior [backup]({% link {{ page.version.version }}/backup.md %}). If revision history is in the backup, you can restore from a [point in time]({% link {{ page.version.version }}/take-backups-with-revision-history-and-restore-from-a-point-in-time.md %}). -Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see [#51380](https://github.com/cockroachdb/cockroach/issues/51380) for more information), and you will need to take a backup that is backdated to the system time when the database still existed. +Instead of dropping the corrupted table or database, we recommend [renaming the table]({% link {{ page.version.version }}/alter-table.md %}#rename-to) or [renaming the database]({% link {{ page.version.version }}/alter-database.md %}#rename-to) so you have historical data to compare to later. If you drop a database, the database cannot be referenced with `AS OF SYSTEM TIME` queries (see #51380 for more information), and you will need to take a backup that is backdated to the system time when the database still existed. {{site.data.alerts.callout_info}} If the table you are restoring has foreign keys, [careful consideration]({% link {{ page.version.version }}/restore.md %}#remove-the-foreign-key-before-restore) should be applied to make sure data integrity is maintained during the restore process. diff --git a/src/current/v26.2/eventlog.md b/src/current/v26.2/eventlog.md index 2dd3c072d82..e7b813ded9d 100644 --- a/src/current/v26.2/eventlog.md +++ b/src/current/v26.2/eventlog.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/eventlog.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/eventlog.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v26.2/file-an-issue.md b/src/current/v26.2/file-an-issue.md index 9255e2c08b2..760c5b181db 100644 --- a/src/current/v26.2/file-an-issue.md +++ b/src/current/v26.2/file-an-issue.md @@ -28,7 +28,7 @@ To file an issue in GitHub, we need the following information: ### Template -You can use this as a template for [filing an issue in GitHub](https://github.com/cockroachdb/cockroach/issues/new): +You can use this as a template for filing an issue in GitHub: ~~~ diff --git a/src/current/v26.2/fips.md b/src/current/v26.2/fips.md index f1d6cf8c51d..9b5b26f2576 100644 --- a/src/current/v26.2/fips.md +++ b/src/current/v26.2/fips.md @@ -84,7 +84,7 @@ The FIPS-ready CockroachDB Docker images are based on [Red Hat's Universal Base If you do not want to use the FIPS-ready CockroachDB Docker image directly, you can create a custom Docker image based on [Red Hat's Universal Base Image 10](https://catalog.redhat.com/software/containers/ubi10/ubi-minimal/): -- You can model your Dockerfile on the one that Cockroach Labs uses to produce the [FIPS-ready Docker image](https://github.com/cockroachdb/cockroach/blob/master/build/deploy/Dockerfile) for CockroachDB. +- You can model your Dockerfile on the one that Cockroach Labs uses to produce the FIPS-ready Docker image for CockroachDB. - The FIPS-ready binary includes the FIPS 140-3 Go Cryptographic Module and does not require additional system libraries to be installed. diff --git a/src/current/v26.2/foreign-key.md b/src/current/v26.2/foreign-key.md index b9e6cc50f5c..da7def88ff1 100644 --- a/src/current/v26.2/foreign-key.md +++ b/src/current/v26.2/foreign-key.md @@ -92,7 +92,7 @@ For matching purposes, composite foreign keys can be in one of three states: For examples showing how these key matching algorithms work, see [Match composite foreign keys with `MATCH SIMPLE` and `MATCH FULL`](#match-composite-foreign-keys-with-match-simple-and-match-full). {{site.data.alerts.callout_info}} -CockroachDB does not support `MATCH PARTIAL`. For more information, see issue [#20305](https://github.com/cockroachdb/cockroach/issues/20305). +CockroachDB does not support `MATCH PARTIAL`. For more information, see issue #20305. {{site.data.alerts.end}} ### Foreign key actions diff --git a/src/current/v26.2/functions-and-operators.md b/src/current/v26.2/functions-and-operators.md index 3da97a7f5fd..80c1485ede8 100644 --- a/src/current/v26.2/functions-and-operators.md +++ b/src/current/v26.2/functions-and-operators.md @@ -51,7 +51,7 @@ In addition to the built-in functions described in the following sections, Cockr ## Built-in functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Aggregate functions @@ -61,11 +61,11 @@ For examples showing how to use aggregate functions, see [the `SELECT` clause do Non-commutative aggregate functions are sensitive to the order in which the rows are processed in the surrounding [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#aggregate-functions). To specify the order in which input rows are processed, you can add an [`ORDER BY`]({% link {{ page.version.version }}/order-by.md %}) clause within the function argument list. For examples, see the [`SELECT` clause]({% link {{ page.version.version }}/select-clause.md %}#order-aggregate-function-input-rows-by-column) documentation. {{site.data.alerts.end}} -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/aggregates.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/aggregates.md{% endcapture %}{% include {{ cockroach_include }} %} ## Window functions -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/window_functions.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/window_functions.md{% endcapture %}{% include {{ cockroach_include }} %} ## Operators @@ -137,7 +137,7 @@ The following table lists all CockroachDB operators from highest to lowest prece ### Supported operations -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/sql/operators.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/sql/operators.md{% endcapture %}{% include {{ cockroach_include }} %} {% comment %} ## `CAST()` diff --git a/src/current/v26.2/install-client-drivers.md b/src/current/v26.2/install-client-drivers.md index 75b39685574..1a20d6e779f 100644 --- a/src/current/v26.2/install-client-drivers.md +++ b/src/current/v26.2/install-client-drivers.md @@ -8,7 +8,7 @@ docs_area: develop CockroachDB supports both native drivers and the PostgreSQL wire protocol, so most available PostgreSQL client drivers and ORM frameworks should work with CockroachDB. Choose a language for supported clients, and follow the installation steps. After you install a client library, you can [connect to the database]({% link {{ page.version.version }}/connect-to-the-database.md %}). {{site.data.alerts.callout_info}} -Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please [open an issue](https://github.com/cockroachdb/cockroach/issues/new) with details to help us make progress toward full support. +Applications may encounter incompatibilities when using advanced or obscure features of a driver or ORM framework with **partial** support. If you encounter problems, please open an issue with details to help us make progress toward full support. {{site.data.alerts.end}}
      diff --git a/src/current/v26.2/install-cockroachdb-mac.md b/src/current/v26.2/install-cockroachdb-mac.md index 116d9e9901e..40dff5896d1 100644 --- a/src/current/v26.2/install-cockroachdb-mac.md +++ b/src/current/v26.2/install-cockroachdb-mac.md @@ -201,4 +201,4 @@ CockroachDB runtimes built for the ARM architecture have the following limitatio {% comment %}v22.2.0+{% endcomment %} -On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to [GitHub issue #93161](https://github.com/cockroachdb/cockroach/issues/93161) for more information. +On macOS ARM systems, [spatial features]({% link {{ page.version.version }}/spatial-data-overview.md %}) are disabled due to an issue with macOS code signing for the GEOS libraries. Users needing spatial features on an ARM Mac may instead [run the Intel binary](#install-the-binary) or use the[Docker container image](#use-docker). Refer to GitHub issue #93161 for more information. diff --git a/src/current/v26.2/intellij-idea.md b/src/current/v26.2/intellij-idea.md index 7a5b5adcb6e..3e226a45cac 100644 --- a/src/current/v26.2/intellij-idea.md +++ b/src/current/v26.2/intellij-idea.md @@ -81,7 +81,7 @@ You can now use IntelliJ's [database tool window](https://www.jetbrains.com/help ## Report issues with IntelliJ IDEA & CockroachDB -If you encounter issues other than those outlined above, please [file an issue on the `cockroachdb/cockroach` GitHub repo](https://github.com/cockroachdb/cockroach/issues/new?template=bug_report.md), including the following details about the environment where you encountered the issue: +If you encounter issues other than those outlined above, please file an issue on the `cockroachdb/cockroach` GitHub repo, including the following details about the environment where you encountered the issue: - CockroachDB version ([`cockroach version`]({% link {{ page.version.version }}/cockroach-version.md %})) - IntelliJ IDEA version diff --git a/src/current/v26.2/kubernetes-performance.md b/src/current/v26.2/kubernetes-performance.md index 6fa4d534442..cfe4aec6725 100644 --- a/src/current/v26.2/kubernetes-performance.md +++ b/src/current/v26.2/kubernetes-performance.md @@ -24,9 +24,9 @@ Before you focus on optimizing a Kubernetes-orchestrated CockroachDB cluster: A number of independent factors affect performance when running CockroachDB on Kubernetes. Most are easiest to change before you create your CockroachDB cluster. If you need to modify a CockroachDB cluster that is already running on Kubernetes, extra care and testing is strongly recommended. -The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/cockroachdb-statefulset.yaml). +The following sections show how to modify excerpts from our provided Kubernetes configuration YAML files. You can find the most up-to-date versions of these files on GitHub: [one for running CockroachDB in secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset-secure.yaml) and one for [running CockroachDB in insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/cockroachdb-statefulset.yaml). -You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. +You can also use a [performance-optimized configuration file for secure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-secure.yaml) or [insecure mode](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/performance/cockroachdb-statefulset-insecure.yaml). Be sure to modify the file wherever there is a `TODO` comment. ### Version of CockroachDB @@ -336,7 +336,7 @@ If for some reason setting appropriate resource requests still isn't getting you #### Client applications on the same machines as CockroachDB -Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/cockroach/blob/98c506c48f3517d1ac1aadb6a09e1b23ad672c37/cloud/kubernetes/example-app.yaml#L11-L12): +Running client applications such as benchmarking applications on the same machines as CockroachDB can be even worse than just having Kubernetes system pods on the same machines. They are very likely to end up competing for resources, because when the applications get more loaded than usual, so will the CockroachDB processes. The best way to avoid this is to [set resource requests and limits](#resource-requests-and-limits), but if you are unwilling or unable to do that for some reason, you can also set [anti-affinity scheduling policies](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) on your client applications. Anti-affinity policies are placed in the pod spec, so if you wanted to change our provided example load generator app, you'd change [these lines](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml): ~~~ yaml spec: diff --git a/src/current/v26.2/log-formats.md b/src/current/v26.2/log-formats.md index 7c715f38c89..cfcf8512b19 100644 --- a/src/current/v26.2/log-formats.md +++ b/src/current/v26.2/log-formats.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logformats.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logformats.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v26.2/logging.md b/src/current/v26.2/logging.md index a9b22c4c05f..357817bc212 100644 --- a/src/current/v26.2/logging.md +++ b/src/current/v26.2/logging.md @@ -5,4 +5,4 @@ toc: true docs_area: reference.logging --- -{% remote_include https://raw.githubusercontent.com/cockroachdb/cockroach/{{ page.release_info.crdb_branch_name }}/docs/generated/logging.md %} +{% capture cockroach_include %}cockroach-generated/{{ page.release_info.crdb_branch_name }}/logging.md{% endcapture %}{% include {{ cockroach_include }} %} diff --git a/src/current/v26.2/migrate-from-avro.md b/src/current/v26.2/migrate-from-avro.md index de42d232917..eb46e391fce 100644 --- a/src/current/v26.2/migrate-from-avro.md +++ b/src/current/v26.2/migrate-from-avro.md @@ -161,7 +161,7 @@ There are additional import [options][option] you can use when importing binary - `records_terminated_by`, which specifies the unicode character used to indicate new lines in the input binary or JSON file (default: `\n`). {{site.data.alerts.callout_info}} -The following example uses sample data generated by [Avro tools](https://github.com/cockroachdb/cockroach/tree/master/pkg/sql/importer/testdata/avro). +The following example uses sample data generated by Avro tools. {{site.data.alerts.end}} For example, to import the data from `simple-schema.json` into a `simple` table, first [create the table]({% link {{ page.version.version }}/create-table.md %}) to import into. Then run `IMPORT INTO` with the following options: diff --git a/src/current/v26.2/monitor-cockroachdb-kubernetes.md b/src/current/v26.2/monitor-cockroachdb-kubernetes.md index 0d4fe02ebc5..3af904b5d15 100644 --- a/src/current/v26.2/monitor-cockroachdb-kubernetes.md +++ b/src/current/v26.2/monitor-cockroachdb-kubernetes.md @@ -132,7 +132,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ {{site.data.alerts.callout_info}} @@ -181,14 +181,14 @@ If you're on Hosted GKE, before starting, make sure the email address associated ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our alertmanager-config.yaml configuration file: +1. Download our alertmanager-config.yaml configuration file: {% include_cached copy-clipboard.html %} ~~~ shell $ curl -O \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -218,12 +218,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [`alertmanager.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ @@ -248,12 +248,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell $ kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ diff --git a/src/current/v26.2/monitor-cockroachdb-operator.md b/src/current/v26.2/monitor-cockroachdb-operator.md index c47ec443b79..d49baf76d7e 100644 --- a/src/current/v26.2/monitor-cockroachdb-operator.md +++ b/src/current/v26.2/monitor-cockroachdb-operator.md @@ -76,7 +76,7 @@ If you're on Hosted GKE, before starting, make sure the email address associated {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/prometheus.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/prometheus.yaml ~~~ 1. Apply the Prometheus manifest. This creates the various objects necessary to run a Prometheus instance: @@ -119,13 +119,13 @@ For more details on using the Prometheus UI, see their [official documentation]( ## Configure Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. This section shows you how to use [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) and CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml) to do this. -1. Download our [alertmanager-config.yaml](https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: +1. Download our [alertmanager-config.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml) configuration file: {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager-config.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager-config.yaml ~~~ 1. Edit the `alertmanager-config.yaml` file to [specify the desired receivers for notifications](https://prometheus.io/docs/alerting/configuration/#receiver). Initially, the file contains a placeholder web hook. @@ -152,12 +152,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen The name of the secret, `alertmanager-cockroachdb`, must match the name used in the `alertmanager.yaml` file. If they differ, the Alertmanager instance will start without configuration, and nothing will happen. {{site.data.alerts.end}} -1. Use our [alertmanager.yaml](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: +1. Use our [alertmanager.yaml](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml) file to create the various objects necessary to run an Alertmanager instance, including a ClusterIP service so that Prometheus can forward alerts: {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alertmanager.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alertmanager.yaml ~~~ ~~~ shell alertmanager.monitoring.coreos.com/cockroachdb created @@ -180,12 +180,12 @@ Active monitoring helps you spot problems early, but it is also essential to sen Alertmanager -1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/prometheus/alert-rules.yaml): +1. Add CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml): {% include_cached copy-clipboard.html %} ~~~ shell kubectl apply \ - -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/prometheus/alert-rules.yaml + -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/prometheus/alert-rules.yaml ~~~ ~~~ shell prometheusrule.monitoring.coreos.com/prometheus-cockroachdb-rules created diff --git a/src/current/v26.2/monitor-cockroachdb-with-prometheus.md b/src/current/v26.2/monitor-cockroachdb-with-prometheus.md index 3d03a38c9d3..966168d0534 100644 --- a/src/current/v26.2/monitor-cockroachdb-with-prometheus.md +++ b/src/current/v26.2/monitor-cockroachdb-with-prometheus.md @@ -15,7 +15,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration - Make sure you have already started a CockroachDB cluster, either [locally]({% link {{ page.version.version }}/start-a-local-cluster.md %}) or in a [production environment]({% link {{ page.version.version }}/manual-deployment.md %}). -- Note that all files used in this tutorial can be found in the [`monitoring`](https://github.com/cockroachdb/cockroach/tree/master/monitoring) directory of the CockroachDB repository. +- Note that all files used in this tutorial can be found in the `monitoring` directory of the CockroachDB repository. ## Step 1. Install Prometheus @@ -39,11 +39,11 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 2. Configure Prometheus -1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/cockroach/blob/master/monitoring/prometheus.yml) for CockroachDB: +1. Download the starter [Prometheus configuration file](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/prometheus.yml) for CockroachDB: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/prometheus.yml + curl -o prometheus.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/prometheus.yml ~~~ When you examine the configuration file, you'll see that it is set up to scrape the time series metrics of a single, insecure local node every 10 seconds: @@ -60,7 +60,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration Production cluster | Change the `targets` field to include `':'` for each node in the cluster. Also, be sure your network configuration allows TCP communication on the specified ports. Secure cluster | Uncomment `scheme: 'https'` and comment out `scheme: 'http'`. -1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml) for CockroachDB into it: +1. Create a `rules` directory and download the [aggregation rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml) and [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml) for CockroachDB into it: {% include_cached copy-clipboard.html %} ~~~ shell @@ -74,12 +74,12 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/aggregation.rules.yml + curl -o rules/aggregation.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/aggregation.rules.yml ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/rules/alerts.rules.yml + curl -o rules/alerts.rules.yml https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml ~~~ ## Step 3. Start Prometheus @@ -108,7 +108,7 @@ This tutorial explores the CockroachDB {{ site.data.products.core }} integration ## Step 4. Send notifications with Alertmanager -Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/cockroach/blob/master/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). +Active monitoring helps you spot problems early, but it is also essential to send notifications when there are events that require investigation or intervention. In step 2, you already downloaded CockroachDB's starter [alerting rules](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/monitoring/rules/alerts.rules.yml). Now, download, configure, and start [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). 1. Download the [latest Alertmanager tarball](https://prometheus.io/download/#alertmanager) for your OS. @@ -173,29 +173,29 @@ Although Prometheus lets you graph metrics, [Grafana](https://grafana.com/) is a Url | `http://:9090` Access | Direct -1. Generate a standardized Grafana dashboard using [`cockroach gen dashboard --tool=grafana`]({% link {{ page.version.version }}/cockroach-gen.md %}#generate-a-dashboard). Alternatively, download one or more of the starter [Grafana dashboards](https://github.com/cockroachdb/cockroach/tree/master/monitoring/grafana-dashboards/by-cluster) for CockroachDB to focus on specific metrics: +1. Generate a standardized Grafana dashboard using [`cockroach gen dashboard --tool=grafana`]({% link {{ page.version.version }}/cockroach-gen.md %}#generate-a-dashboard). Alternatively, download one or more of the starter Grafana dashboards for CockroachDB to focus on specific metrics: {% include_cached copy-clipboard.html %} ~~~ shell - curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/runtime.json + curl -o runtime.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/runtime.json # runtime dashboard: node status, including uptime, memory, and cpu. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o storage.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/storage.json + curl -o storage.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/storage.json # storage dashboard: storage availability. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o sql.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/sql.json + curl -o sql.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/sql.json # sql dashboard: sql queries/transactions. ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - curl -o replication.json https://raw.githubusercontent.com/cockroachdb/cockroach/master/monitoring/grafana-dashboards/by-cluster/replication.json + curl -o replication.json https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/monitoring/grafana-dashboards/by-cluster/replication.json # replicas dashboard: replica information and operations. ~~~ diff --git a/src/current/v26.2/monitoring-and-alerting.md b/src/current/v26.2/monitoring-and-alerting.md index d6bd689cb1a..2cb49e5e35a 100644 --- a/src/current/v26.2/monitoring-and-alerting.md +++ b/src/current/v26.2/monitoring-and-alerting.md @@ -1102,7 +1102,7 @@ Start Prometheus and Alertmanager to begin watching for events to alert on. You ### Events to alert on {{site.data.alerts.callout_info}} -Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). +Currently, not all events listed have corresponding alert rule definitions available from the `api/v2/rules/` endpoint. Many events not yet available in this manner are defined in the pre-defined alerting rules. For more details, see [Monitor CockroachDB with Prometheus]({% link {{ page.version.version }}/monitor-cockroachdb-with-prometheus.md %}). {{site.data.alerts.end}} #### Node is down @@ -1111,7 +1111,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** If a node is down, its Prometheus endpoint will return a `Connection refused` error. Otherwise, the `liveness_livenodes` metric will be the total number of live nodes in the cluster. -- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceDead` alert from our pre-defined alerting rules. #### Node is restarting too frequently @@ -1119,7 +1119,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the number of times the `sys_uptime` metric in the node's Prometheus endpoint output was reset back to zero. The `sys_uptime` metric gives you the length of time, in seconds, that the `cockroach` process has been running. -- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `InstanceFlapping` alert from our pre-defined alerting rules. #### Node is running low on disk space @@ -1127,7 +1127,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Divide the `capacity` metric by the `capacity_available` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `StoreDiskLow` alert from our pre-defined alerting rules. {% include {{page.version.version}}/storage/free-up-disk-space.md %} @@ -1143,7 +1143,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_ca` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `CACertificateExpiresSoon` alert from our pre-defined alerting rules. #### Node certificate expires soon @@ -1151,7 +1151,7 @@ Currently, not all events listed have corresponding alert rule definitions avail - **How to detect:** Calculate this using the `security_certificate_expiration_node` metric in the node's Prometheus endpoint output. -- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. +- **Rule definition:** Use the `NodeCertificateExpiresSoon` alert from our pre-defined alerting rules. #### Changefeed is experiencing high latency diff --git a/src/current/v26.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md b/src/current/v26.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md index 55f144f4d74..34fcca4fb4a 100644 --- a/src/current/v26.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md +++ b/src/current/v26.2/orchestrate-cockroachdb-with-kubernetes-multi-cluster.md @@ -71,7 +71,7 @@ Instead of using this approach, you can now [enable global access](https://cloud Our multi-region deployment approach relies on pod IP addresses being routable across three distinct Kubernetes clusters and regions. Both the hosted Google Kubernetes Engine (GKE) and Amazon Elastic Kubernetes Service (EKS) satisfy this requirement. -If you want to run on another cloud or on-premises, use this [basic network test](https://github.com/cockroachdb/cockroach/tree/master/cloud/kubernetes/multiregion#pod-to-pod-connectivity) to see if it will work. +If you want to run on another cloud or on-premises, use this basic network test to see if it will work.
      1. Complete the **Before You Begin** steps described in the [Google Kubernetes Engine Quickstart](https://cloud.google.com/kubernetes-engine/docs/quickstart) documentation. @@ -322,21 +322,21 @@ This important rule enables node communication between Kubernetes clusters in di The Kubernetes cluster in each region needs to have a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) pointed at its CoreDNS service, which you will configure in the next step. -1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: +1. Upload our load balancer manifest [`dns-lb-eks.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml) to the Kubernetes clusters in all 3 regions: {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ {% include_cached copy-clipboard.html %} ~~~ shell - kubectl apply -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context + kubectl apply -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/dns-lb-eks.yaml --context ~~~ You should see the load balancer appear in the Load Balancers section of the EC2 console in each region. This load balancer will route traffic to CoreDNS in the region. @@ -369,11 +369,11 @@ Each Kubernetes cluster has a [CoreDNS](https://coredns.io/) service that respon To enable traffic forwarding to CockroachDB pods in all 3 regions, you need to [modify the ConfigMap](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options) for the CoreDNS Corefile in each region. -1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/configmap.yaml): +1. Download and open our ConfigMap template [`configmap.yaml`](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml): {% include_cached copy-clipboard.html %} ~~~ shell - curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/configmap.yaml + curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/configmap.yaml ~~~ 1. After [obtaining the IP addresses of the Network Load Balancers](#set-up-load-balancing) in all 3 regions, you can use this information to define a **separate ConfigMap for each region**. Each unique ConfigMap lists the forwarding addresses for the pods in the 2 other regions. @@ -467,7 +467,7 @@ If you plan to run your instances exclusively on private subnets, set the follow {% include_cached copy-clipboard.html %} ~~~ shell $ curl -OOOOOOOOO \ - https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} + https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/{README.md,client-secure.yaml,cluster-init-secure.yaml,cockroachdb-statefulset-secure.yaml,dns-lb.yaml,example-app-secure.yaml,external-name-svc.yaml,setup.py,teardown.py} ~~~ 1. Retrieve the `kubectl` "contexts" for your clusters: @@ -685,11 +685,11 @@ The below steps use [`cockroach cert` commands]({% link {{ page.version.version ### Create StatefulSets -1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. +1. Download and open our [multi-region StatefulSet configuration](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml). You'll save three versions of this file locally, one for each set of 3 CockroachDB nodes per region. {% include_cached copy-clipboard.html %} ~~~ shell - $ curl -O https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml + $ curl -O https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/eks/cockroachdb-statefulset-secure-eks.yaml ~~~ Look for **TODO** comments in the file. These highlight fields you need to define before deploying your StatefulSet. @@ -814,7 +814,7 @@ The pod uses the `root` client certificate created earlier by the `setup.py` scr {% include_cached copy-clipboard.html %} ~~~ shell - kubectl create -f https://raw.githubusercontent.com/cockroachdb/cockroach/master/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace + kubectl create -f https://raw.githubusercontent.com/cockroachdb/docs/main/src/current/files/cockroach/cloud/kubernetes/multiregion/client-secure.yaml --context --namespace ~~~ ~~~ diff --git a/src/current/v26.2/postgresql-compatibility.md b/src/current/v26.2/postgresql-compatibility.md index ad110c9e4ef..d6be88a52d4 100644 --- a/src/current/v26.2/postgresql-compatibility.md +++ b/src/current/v26.2/postgresql-compatibility.md @@ -34,11 +34,11 @@ When set to `true`, multiple portals can be open at the same time, with their ex This feature has the following limitations: - Only read-only [`SELECT` queries]({% link {{ page.version.version }}/selection-queries.md %}) without [subqueries]({% link {{ page.version.version }}/subqueries.md %}) are supported. -- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - [cockroachdb/cockroach#96398](https://github.com/cockroachdb/cockroach/issues/96398) -- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - [cockroachdb/cockroach#100822](https://github.com/cockroachdb/cockroach/issues/100822) -- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - [cockroachdb/cockroach#99404](https://github.com/cockroachdb/cockroach/issues/99404) +- Postqueries (which are how CockroachDB executes [foreign key checks]({% link {{ page.version.version }}/foreign-key.md %}), for example) are not supported - cockroachdb/cockroach#96398 +- [Distributed SQL execution]({% link {{ page.version.version }}/architecture/sql-layer.md %}#distsql) is not supported for multiple active portals; instead queries execute on the [gateway node]({% link {{ page.version.version }}/architecture/life-of-a-distributed-transaction.md %}#gateway) only - cockroachdb/cockroach#100822 +- Only the latest execution of a statement from a pausable portal is recorded by the [trace infrastructure]({% link {{ page.version.version }}/show-trace.md %}) - cockroachdb/cockroach#99404 -In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed [on GitHub using the `A-pausable-portals` label](https://github.com/cockroachdb/cockroach/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc++label%3AA-pausable-portals+), and we welcome bug reports. +In addition to the known issues, additional performance testing is needed. The current list of known issues can be viewed on GitHub using the `A-pausable-portals` label, and we welcome bug reports. ## Features that differ from PostgreSQL @@ -181,7 +181,7 @@ An `x` value less than `1` would result in the following error: pq: check constraint violated ~~~ -[#35370](https://github.com/cockroachdb/cockroach/issues/35370) +#35370 ### Column name from an outer column inside a subquery @@ -207,7 +207,7 @@ PostgreSQL: 1 ~~~ -[#46563](https://github.com/cockroachdb/cockroach/issues/46563) +#46563 ### SQL Compatibility diff --git a/src/current/v26.2/query-spatial-data.md b/src/current/v26.2/query-spatial-data.md index 2684600938e..5b282aa423e 100644 --- a/src/current/v26.2/query-spatial-data.md +++ b/src/current/v26.2/query-spatial-data.md @@ -24,7 +24,7 @@ Just as CockroachDB strives for [PostgreSQL compatibility]({% link {{ page.versi CockroachDB does not implement the full list of PostGIS built-in functions and operators. Also, [spatial indexing works differently]({% link {{ page.version.version }}/spatial-indexes.md %}) (see the [Performance](#performance) section below). For a list of the spatial functions CockroachDB supports, see [Geospatial functions]({% link {{ page.version.version }}/functions-and-operators.md %}#spatial-functions). -If your application needs support for functions that are not yet implemented, check the [meta-issue for built-in function support on GitHub](https://github.com/cockroachdb/cockroach/issues/49203), which describes how to find an issue for the built-in function(s) you need. +If your application needs support for functions that are not yet implemented, check the meta-issue for built-in function support on GitHub, which describes how to find an issue for the built-in function(s) you need. For a list of other known limitations, refer to [Known Limitations]({% link {{ page.version.version }}/spatial-data-overview.md %}#known-limitations). diff --git a/src/current/v26.2/read-committed.md b/src/current/v26.2/read-committed.md index 6b83f50d88d..0858d1e0244 100644 --- a/src/current/v26.2/read-committed.md +++ b/src/current/v26.2/read-committed.md @@ -20,7 +20,7 @@ Whereas `SERIALIZABLE` isolation guarantees data correctness by placing transact If your workload is already running well under `SERIALIZABLE` isolation, Cockroach Labs does not recommend changing to `READ COMMITTED` isolation unless there is a specific need. {{site.data.alerts.callout_info}} -`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md). +`READ COMMITTED` on CockroachDB provides stronger isolation than `READ COMMITTED` on PostgreSQL. On CockroachDB, `READ COMMITTED` prevents anomalies within single statements. For complete details on how `READ COMMITTED` is implemented on CockroachDB, see the [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md). {{site.data.alerts.end}} ## Enable `READ COMMITTED` isolation @@ -918,5 +918,5 @@ SELECT * FROM schedules - [`SELECT ... FOR UPDATE`]({% link {{ page.version.version }}/select-for-update.md %}) - [Serializable Transactions]({% link {{ page.version.version }}/demo-serializable.md %}) - [What Write Skew Looks Like](https://www.cockroachlabs.com/blog/what-write-skew-looks-like/) -- [Read Committed RFC](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20230122_read_committed_isolation.md) +- [Read Committed RFC](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20230122_read_committed_isolation.md) - [Migration Overview]({% link molt/migration-overview.md %}) diff --git a/src/current/v26.2/restore.md b/src/current/v26.2/restore.md index d1de15c5b86..a8176aa673e 100644 --- a/src/current/v26.2/restore.md +++ b/src/current/v26.2/restore.md @@ -216,11 +216,11 @@ When restoring an individual table that references a user-defined type (e.g., [` - If there is an existing type in the cluster with the same name that is compatible with the type in the backup, CockroachDB will map the type in the backup to the type in the cluster. - If there is an existing type in the cluster with the same name but it is _not_ compatible with the type in the backup, the restore will not succeed and you will be asked to resolve the naming conflict. You can do this by either [dropping]({% link {{ page.version.version }}/drop-type.md %}) or [renaming]({% link {{ page.version.version }}/alter-type.md %}) the existing user-defined type. -In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/cockroach/blob/master/docs/RFCS/20200331_enums.md#physical-layout). For example: +In general, two types are compatible if they are the same kind (e.g., an enum is only compatible with other enums). Additionally, enums are only compatible if they have the same ordered set of elements that have also been [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). For example: - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes', 'no')` are compatible. - `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('no', 'yes')` are not compatible. -- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not created in the same way. +- `CREATE TYPE t1 AS ENUM ('yes', 'no')` and `CREATE TYPE t2 AS ENUM ('yes'); ALTER TYPE t2 ADD VALUE ('no')` are not compatible because they were not [created in the same way](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/docs/RFCS/20200331_enums.md#physical-layout). ### Object dependencies diff --git a/src/current/v26.2/schedule-cockroachdb-kubernetes.md b/src/current/v26.2/schedule-cockroachdb-kubernetes.md index dd67d369c5e..5972aa3012e 100644 --- a/src/current/v26.2/schedule-cockroachdb-kubernetes.md +++ b/src/current/v26.2/schedule-cockroachdb-kubernetes.md @@ -118,7 +118,7 @@ For more context on how these rules work, see the [Kubernetes documentation](htt Specify pod affinities and anti-affinities in `affinity.podAffinity` and `affinity.podAntiAffinity` in the {{ site.data.products.public-operator }}'s custom resource, which is used to [deploy the cluster]({% link {{ page.version.version }}/deploy-cockroachdb-with-kubernetes.md %}#initialize-the-cluster). If you specify multiple `matchExpressions` labels, the node must match all of them. If you specify multiple `values` for a label, the node can match any of the values. -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. {% include_cached copy-clipboard.html %} ~~~ yaml diff --git a/src/current/v26.2/schedule-cockroachdb-operator.md b/src/current/v26.2/schedule-cockroachdb-operator.md index 4630ea6d9d1..25f040d1a91 100644 --- a/src/current/v26.2/schedule-cockroachdb-operator.md +++ b/src/current/v26.2/schedule-cockroachdb-operator.md @@ -88,7 +88,7 @@ Specify pod affinities and node anti-affinities in `cockroachdb.crdbCluster.podT The {{ site.data.products.cockroachdb-operator }} hard-codes the pod template to only allow one pod per Kubernetes node. If you need to override this value, you can [override the pod template]({% link {{ page.version.version }}/override-templates-cockroachdb-operator.md %}#override-the-default-pod). -The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/cockroach/blob/master/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. +The following configuration attempts to schedule CockroachDB pods in the same zones as the pods that run our example [load generator](https://github.com/cockroachdb/docs/blob/main/src/current/files/cockroach/cloud/kubernetes/example-app.yaml) app. It disallows CockroachDB pods from being co-located on the same worker node. ~~~ yaml cockroachdb: diff --git a/src/current/v26.2/sql-feature-support.md b/src/current/v26.2/sql-feature-support.md index d0d4b8051d6..84a5102a931 100644 --- a/src/current/v26.2/sql-feature-support.md +++ b/src/current/v26.2/sql-feature-support.md @@ -194,5 +194,5 @@ XML | ✗ | Standard | XML data can be stored as `BYTES`, but we do not offer XM Triggers | Partial | Standard | [Triggers documentation]({% link {{ page.version.version }}/triggers.md %}) Row-level TTL | ✓ | Common Extension | Automatically delete expired rows. For more information, see [Batch-delete expired data with Row-Level TTL]({% link {{ page.version.version }}/row-level-ttl.md %}). User-defined functions | Partial | Standard | [User-Defined Functions documentation]({% link {{ page.version.version }}/user-defined-functions.md %}) - New in v26.2: `CREATE EXTENSION "fuzzystrmatch"` | ✓ | Common Extension | Provides PostgreSQL-compatible fuzzy string matching functions including `soundex()`, `difference()`, `levenshtein()`, `metaphone()`, [`dmetaphone()`]({% link {{ page.version.version }}/functions-and-operators.md %}#dmetaphone), [`dmetaphone_alt()`]({% link {{ page.version.version }}/functions-and-operators.md %}#dmetaphone_alt), and [`daitch_mokotoff()`]({% link {{ page.version.version }}/functions-and-operators.md %}#daitch_mokotoff). These functions are available without running `CREATE EXTENSION "fuzzystrmatch"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). - `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. [GitHub issue tracking `CREATE EXTENSION` support](https://github.com/cockroachdb/cockroach/issues/74777). + New in v26.2: `CREATE EXTENSION "fuzzystrmatch"` | ✓ | Common Extension | Provides PostgreSQL-compatible fuzzy string matching functions including `soundex()`, `difference()`, `levenshtein()`, `metaphone()`, [`dmetaphone()`]({% link {{ page.version.version }}/functions-and-operators.md %}#dmetaphone), [`dmetaphone_alt()`]({% link {{ page.version.version }}/functions-and-operators.md %}#dmetaphone_alt), and [`daitch_mokotoff()`]({% link {{ page.version.version }}/functions-and-operators.md %}#daitch_mokotoff). These functions are available without running `CREATE EXTENSION "fuzzystrmatch"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. + `CREATE EXTENSION "uuid-ossp"` | ✓ | Common Extension | Provides access to several additional [UUID generation functions]({% link {{ page.version.version }}/functions-and-operators.md %}#id-generation-functions). Note that these UUID functions are available without typing `CREATE EXTENSION "uuid-ossp"`. CockroachDB does not have full support for `CREATE EXTENSION`. GitHub issue tracking `CREATE EXTENSION` support. diff --git a/src/current/v26.2/sql-name-resolution.md b/src/current/v26.2/sql-name-resolution.md index 554a9a65583..9600bfe1ef7 100644 --- a/src/current/v26.2/sql-name-resolution.md +++ b/src/current/v26.2/sql-name-resolution.md @@ -37,7 +37,7 @@ If you are upgrading to {{ page.version.version }}, take any combination of the - [Create new schemas]({% link {{ page.version.version }}/create-schema.md %}) in databases on your cluster. After the schemas are created, use [`ALTER TABLE ... RENAME`]({% link {{ page.version.version }}/alter-table.md %}#rename-to), [`ALTER SEQUENCE ... RENAME`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE ... RENAME`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW ... RENAME`]({% link {{ page.version.version }}/alter-view.md %}) statements to move objects between databases as needed. To move objects between schemas, use [`ALTER TABLE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-table.md %}#set-schema), [`ALTER SEQUENCE ... SET SCHEMA`]({% link {{ page.version.version }}/alter-sequence.md %}), or [`ALTER VIEW ... SET SCHEMA`]({% link {{ page.version.version }}/alter-view.md %}). -- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see [tracking issue](https://github.com/cockroachdb/cockroach/issues/55791). +- If your cluster contains cross-database references (e.g., a cross-database foreign key reference, or a cross-database view reference), use the relevant [`ALTER TABLE`]({% link {{ page.version.version }}/alter-table.md %}), [`ALTER SEQUENCE`]({% link {{ page.version.version }}/alter-sequence.md %}), [`ALTER TYPE`]({% link {{ page.version.version }}/alter-type.md %}), or [`ALTER VIEW `]({% link {{ page.version.version }}/alter-view.md %}) statements to move any cross-referencing objects to the same database, but different schemas. Cross-database object references were allowed in earlier versions of CockroachDB to make database-object naming hierarchies more flexible for users. In v20.2, creating cross-database references are disabled for [foreign keys](foreign-key.html), [views]({% link {{ page.version.version }}/views.md %}), and [sequence ownership]({% link {{ page.version.version }}/create-sequence.md %}). For details, see tracking issue. ## How name resolution works diff --git a/src/current/v26.2/st_union.md b/src/current/v26.2/st_union.md index 3f5a33e6330..c33e609820a 100644 --- a/src/current/v26.2/st_union.md +++ b/src/current/v26.2/st_union.md @@ -12,7 +12,7 @@ Given a set of shapes (e.g., from a [selection query]({% link {{ page.version.ve - [`GEOMETRY`]({% link {{ page.version.version }}/architecture/glossary.md %}#geometry) {{site.data.alerts.callout_info}} -The non-aggregate version of `ST_Union` is not yet implemented. For more information, see [cockroach#49064](https://github.com/cockroachdb/cockroach/issues/49064). +The non-aggregate version of `ST_Union` is not yet implemented. For more information, see cockroach#49064. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.2/temporary-tables.md b/src/current/v26.2/temporary-tables.md index 3a92fc64c97..21af0938cd2 100644 --- a/src/current/v26.2/temporary-tables.md +++ b/src/current/v26.2/temporary-tables.md @@ -10,7 +10,7 @@ docs_area: develop To create a temp table, add `TEMP`/`TEMPORARY` to a [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}) or [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}) statement. For full syntax details, see the [`CREATE TABLE`]({% link {{ page.version.version }}/create-table.md %}#synopsis) and [`CREATE TABLE AS`]({% link {{ page.version.version }}/create-table-as.md %}#synopsis) pages. For example usage, see [Examples](#examples). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}} diff --git a/src/current/v26.2/views.md b/src/current/v26.2/views.md index d21bbf6bef1..53375109db0 100644 --- a/src/current/v26.2/views.md +++ b/src/current/v26.2/views.md @@ -665,7 +665,7 @@ To speed up queries on materialized views, you can add an [index]({% link {{ pag CockroachDB supports session-scoped temporary views. Unlike persistent views, temporary views can only be accessed from the session in which they were created, and they are dropped at the end of the session. You can create temporary views on both persistent tables and [temporary tables]({% link {{ page.version.version }}/temporary-tables.md %}). {{site.data.alerts.callout_info}} -{% include feature-phases/preview.md %} For details, see the tracking issue [cockroachdb/cockroach#46260](https://github.com/cockroachdb/cockroach/issues/46260). +{% include feature-phases/preview.md %} For details, see the tracking issue cockroachdb/cockroach#46260. {{site.data.alerts.end}} {{site.data.alerts.callout_info}}